server.go 71 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package etcdserver
  15. import (
  16. "context"
  17. "encoding/json"
  18. "expvar"
  19. "fmt"
  20. "math"
  21. "math/rand"
  22. "net/http"
  23. "os"
  24. "path"
  25. "regexp"
  26. "sync"
  27. "sync/atomic"
  28. "time"
  29. "go.etcd.io/etcd/v3/auth"
  30. "go.etcd.io/etcd/v3/etcdserver/api"
  31. "go.etcd.io/etcd/v3/etcdserver/api/membership"
  32. "go.etcd.io/etcd/v3/etcdserver/api/rafthttp"
  33. "go.etcd.io/etcd/v3/etcdserver/api/snap"
  34. "go.etcd.io/etcd/v3/etcdserver/api/v2discovery"
  35. "go.etcd.io/etcd/v3/etcdserver/api/v2http/httptypes"
  36. stats "go.etcd.io/etcd/v3/etcdserver/api/v2stats"
  37. "go.etcd.io/etcd/v3/etcdserver/api/v2store"
  38. "go.etcd.io/etcd/v3/etcdserver/api/v3alarm"
  39. "go.etcd.io/etcd/v3/etcdserver/api/v3compactor"
  40. pb "go.etcd.io/etcd/v3/etcdserver/etcdserverpb"
  41. "go.etcd.io/etcd/v3/lease"
  42. "go.etcd.io/etcd/v3/lease/leasehttp"
  43. "go.etcd.io/etcd/v3/mvcc"
  44. "go.etcd.io/etcd/v3/mvcc/backend"
  45. "go.etcd.io/etcd/v3/pkg/fileutil"
  46. "go.etcd.io/etcd/v3/pkg/idutil"
  47. "go.etcd.io/etcd/v3/pkg/pbutil"
  48. "go.etcd.io/etcd/v3/pkg/runtime"
  49. "go.etcd.io/etcd/v3/pkg/schedule"
  50. "go.etcd.io/etcd/v3/pkg/types"
  51. "go.etcd.io/etcd/v3/pkg/wait"
  52. "go.etcd.io/etcd/v3/raft"
  53. "go.etcd.io/etcd/v3/raft/raftpb"
  54. "go.etcd.io/etcd/v3/version"
  55. "go.etcd.io/etcd/v3/wal"
  56. "github.com/coreos/go-semver/semver"
  57. "github.com/coreos/pkg/capnslog"
  58. humanize "github.com/dustin/go-humanize"
  59. "github.com/prometheus/client_golang/prometheus"
  60. "go.uber.org/zap"
  61. )
  62. const (
  63. DefaultSnapshotCount = 100000
  64. // DefaultSnapshotCatchUpEntries is the number of entries for a slow follower
  65. // to catch-up after compacting the raft storage entries.
  66. // We expect the follower has a millisecond level latency with the leader.
  67. // The max throughput is around 10K. Keep a 5K entries is enough for helping
  68. // follower to catch up.
  69. DefaultSnapshotCatchUpEntries uint64 = 5000
  70. StoreClusterPrefix = "/0"
  71. StoreKeysPrefix = "/1"
  72. // HealthInterval is the minimum time the cluster should be healthy
  73. // before accepting add member requests.
  74. HealthInterval = 5 * time.Second
  75. purgeFileInterval = 30 * time.Second
  76. // monitorVersionInterval should be smaller than the timeout
  77. // on the connection. Or we will not be able to reuse the connection
  78. // (since it will timeout).
  79. monitorVersionInterval = rafthttp.ConnWriteTimeout - time.Second
  80. // max number of in-flight snapshot messages etcdserver allows to have
  81. // This number is more than enough for most clusters with 5 machines.
  82. maxInFlightMsgSnap = 16
  83. releaseDelayAfterSnapshot = 30 * time.Second
  84. // maxPendingRevokes is the maximum number of outstanding expired lease revocations.
  85. maxPendingRevokes = 16
  86. recommendedMaxRequestBytes = 10 * 1024 * 1024
  87. )
  88. var (
  89. plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "etcdserver")
  90. storeMemberAttributeRegexp = regexp.MustCompile(path.Join(membership.StoreMembersPrefix, "[[:xdigit:]]{1,16}", "attributes"))
  91. )
  92. func init() {
  93. rand.Seed(time.Now().UnixNano())
  94. expvar.Publish(
  95. "file_descriptor_limit",
  96. expvar.Func(
  97. func() interface{} {
  98. n, _ := runtime.FDLimit()
  99. return n
  100. },
  101. ),
  102. )
  103. }
  104. type Response struct {
  105. Term uint64
  106. Index uint64
  107. Event *v2store.Event
  108. Watcher v2store.Watcher
  109. Err error
  110. }
  111. type ServerV2 interface {
  112. Server
  113. Leader() types.ID
  114. // Do takes a V2 request and attempts to fulfill it, returning a Response.
  115. Do(ctx context.Context, r pb.Request) (Response, error)
  116. stats.Stats
  117. ClientCertAuthEnabled() bool
  118. }
  119. type ServerV3 interface {
  120. Server
  121. RaftStatusGetter
  122. }
  123. func (s *EtcdServer) ClientCertAuthEnabled() bool { return s.Cfg.ClientCertAuthEnabled }
  124. type Server interface {
  125. // AddMember attempts to add a member into the cluster. It will return
  126. // ErrIDRemoved if member ID is removed from the cluster, or return
  127. // ErrIDExists if member ID exists in the cluster.
  128. AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error)
  129. // RemoveMember attempts to remove a member from the cluster. It will
  130. // return ErrIDRemoved if member ID is removed from the cluster, or return
  131. // ErrIDNotFound if member ID is not in the cluster.
  132. RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error)
  133. // UpdateMember attempts to update an existing member in the cluster. It will
  134. // return ErrIDNotFound if the member ID does not exist.
  135. UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error)
  136. // PromoteMember attempts to promote a non-voting node to a voting node. It will
  137. // return ErrIDNotFound if the member ID does not exist.
  138. // return ErrLearnerNotReady if the member are not ready.
  139. // return ErrMemberNotLearner if the member is not a learner.
  140. PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error)
  141. // ClusterVersion is the cluster-wide minimum major.minor version.
  142. // Cluster version is set to the min version that an etcd member is
  143. // compatible with when first bootstrap.
  144. //
  145. // ClusterVersion is nil until the cluster is bootstrapped (has a quorum).
  146. //
  147. // During a rolling upgrades, the ClusterVersion will be updated
  148. // automatically after a sync. (5 second by default)
  149. //
  150. // The API/raft component can utilize ClusterVersion to determine if
  151. // it can accept a client request or a raft RPC.
  152. // NOTE: ClusterVersion might be nil when etcd 2.1 works with etcd 2.0 and
  153. // the leader is etcd 2.0. etcd 2.0 leader will not update clusterVersion since
  154. // this feature is introduced post 2.0.
  155. ClusterVersion() *semver.Version
  156. Cluster() api.Cluster
  157. Alarms() []*pb.AlarmMember
  158. }
  159. // EtcdServer is the production implementation of the Server interface
  160. type EtcdServer struct {
  161. // inflightSnapshots holds count the number of snapshots currently inflight.
  162. inflightSnapshots int64 // must use atomic operations to access; keep 64-bit aligned.
  163. appliedIndex uint64 // must use atomic operations to access; keep 64-bit aligned.
  164. committedIndex uint64 // must use atomic operations to access; keep 64-bit aligned.
  165. term uint64 // must use atomic operations to access; keep 64-bit aligned.
  166. lead uint64 // must use atomic operations to access; keep 64-bit aligned.
  167. // consistIndex used to hold the offset of current executing entry
  168. // It is initialized to 0 before executing any entry.
  169. consistIndex consistentIndex // must use atomic operations to access; keep 64-bit aligned.
  170. r raftNode // uses 64-bit atomics; keep 64-bit aligned.
  171. readych chan struct{}
  172. Cfg ServerConfig
  173. lgMu *sync.RWMutex
  174. lg *zap.Logger
  175. w wait.Wait
  176. readMu sync.RWMutex
  177. // read routine notifies etcd server that it waits for reading by sending an empty struct to
  178. // readwaitC
  179. readwaitc chan struct{}
  180. // readNotifier is used to notify the read routine that it can process the request
  181. // when there is no error
  182. readNotifier *notifier
  183. // stop signals the run goroutine should shutdown.
  184. stop chan struct{}
  185. // stopping is closed by run goroutine on shutdown.
  186. stopping chan struct{}
  187. // done is closed when all goroutines from start() complete.
  188. done chan struct{}
  189. // leaderChanged is used to notify the linearizable read loop to drop the old read requests.
  190. leaderChanged chan struct{}
  191. leaderChangedMu sync.RWMutex
  192. errorc chan error
  193. id types.ID
  194. attributes membership.Attributes
  195. cluster *membership.RaftCluster
  196. v2store v2store.Store
  197. snapshotter *snap.Snapshotter
  198. applyV2 ApplierV2
  199. // applyV3 is the applier with auth and quotas
  200. applyV3 applierV3
  201. // applyV3Base is the core applier without auth or quotas
  202. applyV3Base applierV3
  203. applyWait wait.WaitTime
  204. kv mvcc.ConsistentWatchableKV
  205. lessor lease.Lessor
  206. bemu sync.Mutex
  207. be backend.Backend
  208. authStore auth.AuthStore
  209. alarmStore *v3alarm.AlarmStore
  210. stats *stats.ServerStats
  211. lstats *stats.LeaderStats
  212. SyncTicker *time.Ticker
  213. // compactor is used to auto-compact the KV.
  214. compactor v3compactor.Compactor
  215. // peerRt used to send requests (version, lease) to peers.
  216. peerRt http.RoundTripper
  217. reqIDGen *idutil.Generator
  218. // forceVersionC is used to force the version monitor loop
  219. // to detect the cluster version immediately.
  220. forceVersionC chan struct{}
  221. // wgMu blocks concurrent waitgroup mutation while server stopping
  222. wgMu sync.RWMutex
  223. // wg is used to wait for the go routines that depends on the server state
  224. // to exit when stopping the server.
  225. wg sync.WaitGroup
  226. // ctx is used for etcd-initiated requests that may need to be canceled
  227. // on etcd server shutdown.
  228. ctx context.Context
  229. cancel context.CancelFunc
  230. leadTimeMu sync.RWMutex
  231. leadElectedTime time.Time
  232. *AccessController
  233. }
  234. // NewServer creates a new EtcdServer from the supplied configuration. The
  235. // configuration is considered static for the lifetime of the EtcdServer.
  236. func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
  237. st := v2store.New(StoreClusterPrefix, StoreKeysPrefix)
  238. var (
  239. w *wal.WAL
  240. n raft.Node
  241. s *raft.MemoryStorage
  242. id types.ID
  243. cl *membership.RaftCluster
  244. )
  245. if cfg.MaxRequestBytes > recommendedMaxRequestBytes {
  246. if cfg.Logger != nil {
  247. cfg.Logger.Warn(
  248. "exceeded recommended request limit",
  249. zap.Uint("max-request-bytes", cfg.MaxRequestBytes),
  250. zap.String("max-request-size", humanize.Bytes(uint64(cfg.MaxRequestBytes))),
  251. zap.Int("recommended-request-bytes", recommendedMaxRequestBytes),
  252. zap.String("recommended-request-size", humanize.Bytes(uint64(recommendedMaxRequestBytes))),
  253. )
  254. } else {
  255. plog.Warningf("MaxRequestBytes %v exceeds maximum recommended size %v", cfg.MaxRequestBytes, recommendedMaxRequestBytes)
  256. }
  257. }
  258. if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil {
  259. return nil, fmt.Errorf("cannot access data directory: %v", terr)
  260. }
  261. haveWAL := wal.Exist(cfg.WALDir())
  262. if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil {
  263. if cfg.Logger != nil {
  264. cfg.Logger.Fatal(
  265. "failed to create snapshot directory",
  266. zap.String("path", cfg.SnapDir()),
  267. zap.Error(err),
  268. )
  269. } else {
  270. plog.Fatalf("create snapshot directory error: %v", err)
  271. }
  272. }
  273. ss := snap.New(cfg.Logger, cfg.SnapDir())
  274. bepath := cfg.backendPath()
  275. beExist := fileutil.Exist(bepath)
  276. be := openBackend(cfg)
  277. defer func() {
  278. if err != nil {
  279. be.Close()
  280. }
  281. }()
  282. prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.peerDialTimeout())
  283. if err != nil {
  284. return nil, err
  285. }
  286. var (
  287. remotes []*membership.Member
  288. snapshot *raftpb.Snapshot
  289. )
  290. switch {
  291. case !haveWAL && !cfg.NewCluster:
  292. if err = cfg.VerifyJoinExisting(); err != nil {
  293. return nil, err
  294. }
  295. cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
  296. if err != nil {
  297. return nil, err
  298. }
  299. existingCluster, gerr := GetClusterFromRemotePeers(cfg.Logger, getRemotePeerURLs(cl, cfg.Name), prt)
  300. if gerr != nil {
  301. return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr)
  302. }
  303. if err = membership.ValidateClusterAndAssignIDs(cfg.Logger, cl, existingCluster); err != nil {
  304. return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
  305. }
  306. if !isCompatibleWithCluster(cfg.Logger, cl, cl.MemberByName(cfg.Name).ID, prt) {
  307. return nil, fmt.Errorf("incompatible with current running cluster")
  308. }
  309. remotes = existingCluster.Members()
  310. cl.SetID(types.ID(0), existingCluster.ID())
  311. cl.SetStore(st)
  312. cl.SetBackend(be)
  313. id, n, s, w = startNode(cfg, cl, nil)
  314. cl.SetID(id, existingCluster.ID())
  315. case !haveWAL && cfg.NewCluster:
  316. if err = cfg.VerifyBootstrap(); err != nil {
  317. return nil, err
  318. }
  319. cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
  320. if err != nil {
  321. return nil, err
  322. }
  323. m := cl.MemberByName(cfg.Name)
  324. if isMemberBootstrapped(cfg.Logger, cl, cfg.Name, prt, cfg.bootstrapTimeout()) {
  325. return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
  326. }
  327. if cfg.ShouldDiscover() {
  328. var str string
  329. str, err = v2discovery.JoinCluster(cfg.Logger, cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
  330. if err != nil {
  331. return nil, &DiscoveryError{Op: "join", Err: err}
  332. }
  333. var urlsmap types.URLsMap
  334. urlsmap, err = types.NewURLsMap(str)
  335. if err != nil {
  336. return nil, err
  337. }
  338. if checkDuplicateURL(urlsmap) {
  339. return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
  340. }
  341. if cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, urlsmap); err != nil {
  342. return nil, err
  343. }
  344. }
  345. cl.SetStore(st)
  346. cl.SetBackend(be)
  347. id, n, s, w = startNode(cfg, cl, cl.MemberIDs())
  348. cl.SetID(id, cl.ID())
  349. case haveWAL:
  350. if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
  351. return nil, fmt.Errorf("cannot write to member directory: %v", err)
  352. }
  353. if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil {
  354. return nil, fmt.Errorf("cannot write to WAL directory: %v", err)
  355. }
  356. if cfg.ShouldDiscover() {
  357. if cfg.Logger != nil {
  358. cfg.Logger.Warn(
  359. "discovery token is ignored since cluster already initialized; valid logs are found",
  360. zap.String("wal-dir", cfg.WALDir()),
  361. )
  362. } else {
  363. plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir())
  364. }
  365. }
  366. snapshot, err = ss.Load()
  367. if err != nil && err != snap.ErrNoSnapshot {
  368. return nil, err
  369. }
  370. if snapshot != nil {
  371. if err = st.Recovery(snapshot.Data); err != nil {
  372. if cfg.Logger != nil {
  373. cfg.Logger.Panic("failed to recover from snapshot")
  374. } else {
  375. plog.Panicf("recovered store from snapshot error: %v", err)
  376. }
  377. }
  378. if cfg.Logger != nil {
  379. cfg.Logger.Info(
  380. "recovered v2 store from snapshot",
  381. zap.Uint64("snapshot-index", snapshot.Metadata.Index),
  382. zap.String("snapshot-size", humanize.Bytes(uint64(snapshot.Size()))),
  383. )
  384. } else {
  385. plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index)
  386. }
  387. if be, err = recoverSnapshotBackend(cfg, be, *snapshot); err != nil {
  388. if cfg.Logger != nil {
  389. cfg.Logger.Panic("failed to recover v3 backend from snapshot", zap.Error(err))
  390. } else {
  391. plog.Panicf("recovering backend from snapshot error: %v", err)
  392. }
  393. }
  394. if cfg.Logger != nil {
  395. s1, s2 := be.Size(), be.SizeInUse()
  396. cfg.Logger.Info(
  397. "recovered v3 backend from snapshot",
  398. zap.Int64("backend-size-bytes", s1),
  399. zap.String("backend-size", humanize.Bytes(uint64(s1))),
  400. zap.Int64("backend-size-in-use-bytes", s2),
  401. zap.String("backend-size-in-use", humanize.Bytes(uint64(s2))),
  402. )
  403. }
  404. }
  405. if !cfg.ForceNewCluster {
  406. id, cl, n, s, w = restartNode(cfg, snapshot)
  407. } else {
  408. id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot)
  409. }
  410. cl.SetStore(st)
  411. cl.SetBackend(be)
  412. cl.Recover(api.UpdateCapability)
  413. if cl.Version() != nil && !cl.Version().LessThan(semver.Version{Major: 3}) && !beExist {
  414. os.RemoveAll(bepath)
  415. return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath)
  416. }
  417. default:
  418. return nil, fmt.Errorf("unsupported bootstrap config")
  419. }
  420. if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil {
  421. return nil, fmt.Errorf("cannot access member directory: %v", terr)
  422. }
  423. sstats := stats.NewServerStats(cfg.Name, id.String())
  424. lstats := stats.NewLeaderStats(id.String())
  425. heartbeat := time.Duration(cfg.TickMs) * time.Millisecond
  426. srv = &EtcdServer{
  427. readych: make(chan struct{}),
  428. Cfg: cfg,
  429. lgMu: new(sync.RWMutex),
  430. lg: cfg.Logger,
  431. errorc: make(chan error, 1),
  432. v2store: st,
  433. snapshotter: ss,
  434. r: *newRaftNode(
  435. raftNodeConfig{
  436. lg: cfg.Logger,
  437. isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
  438. Node: n,
  439. heartbeat: heartbeat,
  440. raftStorage: s,
  441. storage: NewStorage(w, ss),
  442. },
  443. ),
  444. id: id,
  445. attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
  446. cluster: cl,
  447. stats: sstats,
  448. lstats: lstats,
  449. SyncTicker: time.NewTicker(500 * time.Millisecond),
  450. peerRt: prt,
  451. reqIDGen: idutil.NewGenerator(uint16(id), time.Now()),
  452. forceVersionC: make(chan struct{}),
  453. AccessController: &AccessController{CORS: cfg.CORS, HostWhitelist: cfg.HostWhitelist},
  454. }
  455. serverID.With(prometheus.Labels{"server_id": id.String()}).Set(1)
  456. srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster}
  457. srv.be = be
  458. minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat
  459. // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
  460. // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
  461. srv.lessor = lease.NewLessor(srv.getLogger(), srv.be, lease.LessorConfig{MinLeaseTTL: int64(math.Ceil(minTTL.Seconds())), CheckpointInterval: cfg.LeaseCheckpointInterval})
  462. srv.kv = mvcc.New(srv.getLogger(), srv.be, srv.lessor, &srv.consistIndex)
  463. if beExist {
  464. kvindex := srv.kv.ConsistentIndex()
  465. // TODO: remove kvindex != 0 checking when we do not expect users to upgrade
  466. // etcd from pre-3.0 release.
  467. if snapshot != nil && kvindex < snapshot.Metadata.Index {
  468. if kvindex != 0 {
  469. return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d)", bepath, kvindex, snapshot.Metadata.Index)
  470. }
  471. if cfg.Logger != nil {
  472. cfg.Logger.Warn(
  473. "consistent index was never saved",
  474. zap.Uint64("snapshot-index", snapshot.Metadata.Index),
  475. )
  476. } else {
  477. plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index)
  478. }
  479. }
  480. }
  481. newSrv := srv // since srv == nil in defer if srv is returned as nil
  482. defer func() {
  483. // closing backend without first closing kv can cause
  484. // resumed compactions to fail with closed tx errors
  485. if err != nil {
  486. newSrv.kv.Close()
  487. }
  488. }()
  489. srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex())
  490. tp, err := auth.NewTokenProvider(cfg.Logger, cfg.AuthToken,
  491. func(index uint64) <-chan struct{} {
  492. return srv.applyWait.Wait(index)
  493. },
  494. )
  495. if err != nil {
  496. if cfg.Logger != nil {
  497. cfg.Logger.Warn("failed to create token provider", zap.Error(err))
  498. } else {
  499. plog.Errorf("failed to create token provider: %s", err)
  500. }
  501. return nil, err
  502. }
  503. srv.authStore = auth.NewAuthStore(srv.getLogger(), srv.be, tp, int(cfg.BcryptCost))
  504. if num := cfg.AutoCompactionRetention; num != 0 {
  505. srv.compactor, err = v3compactor.New(cfg.Logger, cfg.AutoCompactionMode, num, srv.kv, srv)
  506. if err != nil {
  507. return nil, err
  508. }
  509. srv.compactor.Run()
  510. }
  511. srv.applyV3Base = srv.newApplierV3Backend()
  512. if err = srv.restoreAlarms(); err != nil {
  513. return nil, err
  514. }
  515. srv.lessor.SetCheckpointer(func(ctx context.Context, cp *pb.LeaseCheckpointRequest) {
  516. srv.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseCheckpoint: cp})
  517. })
  518. // TODO: move transport initialization near the definition of remote
  519. tr := &rafthttp.Transport{
  520. Logger: cfg.Logger,
  521. TLSInfo: cfg.PeerTLSInfo,
  522. DialTimeout: cfg.peerDialTimeout(),
  523. ID: id,
  524. URLs: cfg.PeerURLs,
  525. ClusterID: cl.ID(),
  526. Raft: srv,
  527. Snapshotter: ss,
  528. ServerStats: sstats,
  529. LeaderStats: lstats,
  530. ErrorC: srv.errorc,
  531. }
  532. if err = tr.Start(); err != nil {
  533. return nil, err
  534. }
  535. // add all remotes into transport
  536. for _, m := range remotes {
  537. if m.ID != id {
  538. tr.AddRemote(m.ID, m.PeerURLs)
  539. }
  540. }
  541. for _, m := range cl.Members() {
  542. if m.ID != id {
  543. tr.AddPeer(m.ID, m.PeerURLs)
  544. }
  545. }
  546. srv.r.transport = tr
  547. return srv, nil
  548. }
  549. func (s *EtcdServer) getLogger() *zap.Logger {
  550. s.lgMu.RLock()
  551. l := s.lg
  552. s.lgMu.RUnlock()
  553. return l
  554. }
  555. func tickToDur(ticks int, tickMs uint) string {
  556. return fmt.Sprintf("%v", time.Duration(ticks)*time.Duration(tickMs)*time.Millisecond)
  557. }
  558. func (s *EtcdServer) adjustTicks() {
  559. lg := s.getLogger()
  560. clusterN := len(s.cluster.Members())
  561. // single-node fresh start, or single-node recovers from snapshot
  562. if clusterN == 1 {
  563. ticks := s.Cfg.ElectionTicks - 1
  564. if lg != nil {
  565. lg.Info(
  566. "started as single-node; fast-forwarding election ticks",
  567. zap.String("local-member-id", s.ID().String()),
  568. zap.Int("forward-ticks", ticks),
  569. zap.String("forward-duration", tickToDur(ticks, s.Cfg.TickMs)),
  570. zap.Int("election-ticks", s.Cfg.ElectionTicks),
  571. zap.String("election-timeout", tickToDur(s.Cfg.ElectionTicks, s.Cfg.TickMs)),
  572. )
  573. } else {
  574. plog.Infof("%s as single-node; fast-forwarding %d ticks (election ticks %d)", s.ID(), ticks, s.Cfg.ElectionTicks)
  575. }
  576. s.r.advanceTicks(ticks)
  577. return
  578. }
  579. if !s.Cfg.InitialElectionTickAdvance {
  580. if lg != nil {
  581. lg.Info("skipping initial election tick advance", zap.Int("election-ticks", s.Cfg.ElectionTicks))
  582. }
  583. return
  584. }
  585. if lg != nil {
  586. lg.Info("starting initial election tick advance", zap.Int("election-ticks", s.Cfg.ElectionTicks))
  587. }
  588. // retry up to "rafthttp.ConnReadTimeout", which is 5-sec
  589. // until peer connection reports; otherwise:
  590. // 1. all connections failed, or
  591. // 2. no active peers, or
  592. // 3. restarted single-node with no snapshot
  593. // then, do nothing, because advancing ticks would have no effect
  594. waitTime := rafthttp.ConnReadTimeout
  595. itv := 50 * time.Millisecond
  596. for i := int64(0); i < int64(waitTime/itv); i++ {
  597. select {
  598. case <-time.After(itv):
  599. case <-s.stopping:
  600. return
  601. }
  602. peerN := s.r.transport.ActivePeers()
  603. if peerN > 1 {
  604. // multi-node received peer connection reports
  605. // adjust ticks, in case slow leader message receive
  606. ticks := s.Cfg.ElectionTicks - 2
  607. if lg != nil {
  608. lg.Info(
  609. "initialized peer connections; fast-forwarding election ticks",
  610. zap.String("local-member-id", s.ID().String()),
  611. zap.Int("forward-ticks", ticks),
  612. zap.String("forward-duration", tickToDur(ticks, s.Cfg.TickMs)),
  613. zap.Int("election-ticks", s.Cfg.ElectionTicks),
  614. zap.String("election-timeout", tickToDur(s.Cfg.ElectionTicks, s.Cfg.TickMs)),
  615. zap.Int("active-remote-members", peerN),
  616. )
  617. } else {
  618. plog.Infof("%s initialized peer connection; fast-forwarding %d ticks (election ticks %d) with %d active peer(s)", s.ID(), ticks, s.Cfg.ElectionTicks, peerN)
  619. }
  620. s.r.advanceTicks(ticks)
  621. return
  622. }
  623. }
  624. }
  625. // Start performs any initialization of the Server necessary for it to
  626. // begin serving requests. It must be called before Do or Process.
  627. // Start must be non-blocking; any long-running server functionality
  628. // should be implemented in goroutines.
  629. func (s *EtcdServer) Start() {
  630. s.start()
  631. s.goAttach(func() { s.adjustTicks() })
  632. s.goAttach(func() { s.publish(s.Cfg.ReqTimeout()) })
  633. s.goAttach(s.purgeFile)
  634. s.goAttach(func() { monitorFileDescriptor(s.getLogger(), s.stopping) })
  635. s.goAttach(s.monitorVersions)
  636. s.goAttach(s.linearizableReadLoop)
  637. s.goAttach(s.monitorKVHash)
  638. }
  639. // start prepares and starts server in a new goroutine. It is no longer safe to
  640. // modify a server's fields after it has been sent to Start.
  641. // This function is just used for testing.
  642. func (s *EtcdServer) start() {
  643. lg := s.getLogger()
  644. if s.Cfg.SnapshotCount == 0 {
  645. if lg != nil {
  646. lg.Info(
  647. "updating snapshot-count to default",
  648. zap.Uint64("given-snapshot-count", s.Cfg.SnapshotCount),
  649. zap.Uint64("updated-snapshot-count", DefaultSnapshotCount),
  650. )
  651. } else {
  652. plog.Infof("set snapshot count to default %d", DefaultSnapshotCount)
  653. }
  654. s.Cfg.SnapshotCount = DefaultSnapshotCount
  655. }
  656. if s.Cfg.SnapshotCatchUpEntries == 0 {
  657. if lg != nil {
  658. lg.Info(
  659. "updating snapshot catch-up entries to default",
  660. zap.Uint64("given-snapshot-catchup-entries", s.Cfg.SnapshotCatchUpEntries),
  661. zap.Uint64("updated-snapshot-catchup-entries", DefaultSnapshotCatchUpEntries),
  662. )
  663. }
  664. s.Cfg.SnapshotCatchUpEntries = DefaultSnapshotCatchUpEntries
  665. }
  666. s.w = wait.New()
  667. s.applyWait = wait.NewTimeList()
  668. s.done = make(chan struct{})
  669. s.stop = make(chan struct{})
  670. s.stopping = make(chan struct{})
  671. s.ctx, s.cancel = context.WithCancel(context.Background())
  672. s.readwaitc = make(chan struct{}, 1)
  673. s.readNotifier = newNotifier()
  674. s.leaderChanged = make(chan struct{})
  675. if s.ClusterVersion() != nil {
  676. if lg != nil {
  677. lg.Info(
  678. "starting etcd server",
  679. zap.String("local-member-id", s.ID().String()),
  680. zap.String("local-server-version", version.Version),
  681. zap.String("cluster-id", s.Cluster().ID().String()),
  682. zap.String("cluster-version", version.Cluster(s.ClusterVersion().String())),
  683. )
  684. } else {
  685. plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String()))
  686. }
  687. membership.ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": s.ClusterVersion().String()}).Set(1)
  688. } else {
  689. if lg != nil {
  690. lg.Info(
  691. "starting etcd server",
  692. zap.String("local-member-id", s.ID().String()),
  693. zap.String("local-server-version", version.Version),
  694. zap.String("cluster-version", "to_be_decided"),
  695. )
  696. } else {
  697. plog.Infof("starting server... [version: %v, cluster version: to_be_decided]", version.Version)
  698. }
  699. }
  700. // TODO: if this is an empty log, writes all peer infos
  701. // into the first entry
  702. go s.run()
  703. }
  704. func (s *EtcdServer) purgeFile() {
  705. var dberrc, serrc, werrc <-chan error
  706. if s.Cfg.MaxSnapFiles > 0 {
  707. dberrc = fileutil.PurgeFile(s.getLogger(), s.Cfg.SnapDir(), "snap.db", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done)
  708. serrc = fileutil.PurgeFile(s.getLogger(), s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done)
  709. }
  710. if s.Cfg.MaxWALFiles > 0 {
  711. werrc = fileutil.PurgeFile(s.getLogger(), s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.done)
  712. }
  713. lg := s.getLogger()
  714. select {
  715. case e := <-dberrc:
  716. if lg != nil {
  717. lg.Fatal("failed to purge snap db file", zap.Error(e))
  718. } else {
  719. plog.Fatalf("failed to purge snap db file %v", e)
  720. }
  721. case e := <-serrc:
  722. if lg != nil {
  723. lg.Fatal("failed to purge snap file", zap.Error(e))
  724. } else {
  725. plog.Fatalf("failed to purge snap file %v", e)
  726. }
  727. case e := <-werrc:
  728. if lg != nil {
  729. lg.Fatal("failed to purge wal file", zap.Error(e))
  730. } else {
  731. plog.Fatalf("failed to purge wal file %v", e)
  732. }
  733. case <-s.stopping:
  734. return
  735. }
  736. }
  737. func (s *EtcdServer) Cluster() api.Cluster { return s.cluster }
  738. func (s *EtcdServer) ApplyWait() <-chan struct{} { return s.applyWait.Wait(s.getCommittedIndex()) }
  739. type ServerPeer interface {
  740. ServerV2
  741. RaftHandler() http.Handler
  742. LeaseHandler() http.Handler
  743. }
  744. func (s *EtcdServer) LeaseHandler() http.Handler {
  745. if s.lessor == nil {
  746. return nil
  747. }
  748. return leasehttp.NewHandler(s.lessor, s.ApplyWait)
  749. }
  750. func (s *EtcdServer) RaftHandler() http.Handler { return s.r.transport.Handler() }
  751. // Process takes a raft message and applies it to the server's raft state
  752. // machine, respecting any timeout of the given context.
  753. func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
  754. if s.cluster.IsIDRemoved(types.ID(m.From)) {
  755. if lg := s.getLogger(); lg != nil {
  756. lg.Warn(
  757. "rejected Raft message from removed member",
  758. zap.String("local-member-id", s.ID().String()),
  759. zap.String("removed-member-id", types.ID(m.From).String()),
  760. )
  761. } else {
  762. plog.Warningf("reject message from removed member %s", types.ID(m.From).String())
  763. }
  764. return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message from removed member")
  765. }
  766. if m.Type == raftpb.MsgApp {
  767. s.stats.RecvAppendReq(types.ID(m.From).String(), m.Size())
  768. }
  769. return s.r.Step(ctx, m)
  770. }
  771. func (s *EtcdServer) IsIDRemoved(id uint64) bool { return s.cluster.IsIDRemoved(types.ID(id)) }
  772. func (s *EtcdServer) ReportUnreachable(id uint64) { s.r.ReportUnreachable(id) }
  773. // ReportSnapshot reports snapshot sent status to the raft state machine,
  774. // and clears the used snapshot from the snapshot store.
  775. func (s *EtcdServer) ReportSnapshot(id uint64, status raft.SnapshotStatus) {
  776. s.r.ReportSnapshot(id, status)
  777. }
  778. type etcdProgress struct {
  779. confState raftpb.ConfState
  780. snapi uint64
  781. appliedt uint64
  782. appliedi uint64
  783. }
  784. // raftReadyHandler contains a set of EtcdServer operations to be called by raftNode,
  785. // and helps decouple state machine logic from Raft algorithms.
  786. // TODO: add a state machine interface to apply the commit entries and do snapshot/recover
  787. type raftReadyHandler struct {
  788. getLead func() (lead uint64)
  789. updateLead func(lead uint64)
  790. updateLeadership func(newLeader bool)
  791. updateCommittedIndex func(uint64)
  792. }
  793. func (s *EtcdServer) run() {
  794. lg := s.getLogger()
  795. sn, err := s.r.raftStorage.Snapshot()
  796. if err != nil {
  797. if lg != nil {
  798. lg.Panic("failed to get snapshot from Raft storage", zap.Error(err))
  799. } else {
  800. plog.Panicf("get snapshot from raft storage error: %v", err)
  801. }
  802. }
  803. // asynchronously accept apply packets, dispatch progress in-order
  804. sched := schedule.NewFIFOScheduler()
  805. var (
  806. smu sync.RWMutex
  807. syncC <-chan time.Time
  808. )
  809. setSyncC := func(ch <-chan time.Time) {
  810. smu.Lock()
  811. syncC = ch
  812. smu.Unlock()
  813. }
  814. getSyncC := func() (ch <-chan time.Time) {
  815. smu.RLock()
  816. ch = syncC
  817. smu.RUnlock()
  818. return
  819. }
  820. rh := &raftReadyHandler{
  821. getLead: func() (lead uint64) { return s.getLead() },
  822. updateLead: func(lead uint64) { s.setLead(lead) },
  823. updateLeadership: func(newLeader bool) {
  824. if !s.isLeader() {
  825. if s.lessor != nil {
  826. s.lessor.Demote()
  827. }
  828. if s.compactor != nil {
  829. s.compactor.Pause()
  830. }
  831. setSyncC(nil)
  832. } else {
  833. if newLeader {
  834. t := time.Now()
  835. s.leadTimeMu.Lock()
  836. s.leadElectedTime = t
  837. s.leadTimeMu.Unlock()
  838. }
  839. setSyncC(s.SyncTicker.C)
  840. if s.compactor != nil {
  841. s.compactor.Resume()
  842. }
  843. }
  844. if newLeader {
  845. s.leaderChangedMu.Lock()
  846. lc := s.leaderChanged
  847. s.leaderChanged = make(chan struct{})
  848. close(lc)
  849. s.leaderChangedMu.Unlock()
  850. }
  851. // TODO: remove the nil checking
  852. // current test utility does not provide the stats
  853. if s.stats != nil {
  854. s.stats.BecomeLeader()
  855. }
  856. },
  857. updateCommittedIndex: func(ci uint64) {
  858. cci := s.getCommittedIndex()
  859. if ci > cci {
  860. s.setCommittedIndex(ci)
  861. }
  862. },
  863. }
  864. s.r.start(rh)
  865. ep := etcdProgress{
  866. confState: sn.Metadata.ConfState,
  867. snapi: sn.Metadata.Index,
  868. appliedt: sn.Metadata.Term,
  869. appliedi: sn.Metadata.Index,
  870. }
  871. defer func() {
  872. s.wgMu.Lock() // block concurrent waitgroup adds in goAttach while stopping
  873. close(s.stopping)
  874. s.wgMu.Unlock()
  875. s.cancel()
  876. sched.Stop()
  877. // wait for gouroutines before closing raft so wal stays open
  878. s.wg.Wait()
  879. s.SyncTicker.Stop()
  880. // must stop raft after scheduler-- etcdserver can leak rafthttp pipelines
  881. // by adding a peer after raft stops the transport
  882. s.r.stop()
  883. // kv, lessor and backend can be nil if running without v3 enabled
  884. // or running unit tests.
  885. if s.lessor != nil {
  886. s.lessor.Stop()
  887. }
  888. if s.kv != nil {
  889. s.kv.Close()
  890. }
  891. if s.authStore != nil {
  892. s.authStore.Close()
  893. }
  894. if s.be != nil {
  895. s.be.Close()
  896. }
  897. if s.compactor != nil {
  898. s.compactor.Stop()
  899. }
  900. close(s.done)
  901. }()
  902. var expiredLeaseC <-chan []*lease.Lease
  903. if s.lessor != nil {
  904. expiredLeaseC = s.lessor.ExpiredLeasesC()
  905. }
  906. for {
  907. select {
  908. case ap := <-s.r.apply():
  909. f := func(context.Context) { s.applyAll(&ep, &ap) }
  910. sched.Schedule(f)
  911. case leases := <-expiredLeaseC:
  912. s.goAttach(func() {
  913. // Increases throughput of expired leases deletion process through parallelization
  914. c := make(chan struct{}, maxPendingRevokes)
  915. for _, lease := range leases {
  916. select {
  917. case c <- struct{}{}:
  918. case <-s.stopping:
  919. return
  920. }
  921. lid := lease.ID
  922. s.goAttach(func() {
  923. ctx := s.authStore.WithRoot(s.ctx)
  924. _, lerr := s.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: int64(lid)})
  925. if lerr == nil {
  926. leaseExpired.Inc()
  927. } else {
  928. if lg != nil {
  929. lg.Warn(
  930. "failed to revoke lease",
  931. zap.String("lease-id", fmt.Sprintf("%016x", lid)),
  932. zap.Error(lerr),
  933. )
  934. } else {
  935. plog.Warningf("failed to revoke %016x (%q)", lid, lerr.Error())
  936. }
  937. }
  938. <-c
  939. })
  940. }
  941. })
  942. case err := <-s.errorc:
  943. if lg != nil {
  944. lg.Warn("server error", zap.Error(err))
  945. lg.Warn("data-dir used by this member must be removed")
  946. } else {
  947. plog.Errorf("%s", err)
  948. plog.Infof("the data-dir used by this member must be removed.")
  949. }
  950. return
  951. case <-getSyncC():
  952. if s.v2store.HasTTLKeys() {
  953. s.sync(s.Cfg.ReqTimeout())
  954. }
  955. case <-s.stop:
  956. return
  957. }
  958. }
  959. }
  960. func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) {
  961. s.applySnapshot(ep, apply)
  962. s.applyEntries(ep, apply)
  963. proposalsApplied.Set(float64(ep.appliedi))
  964. s.applyWait.Trigger(ep.appliedi)
  965. // wait for the raft routine to finish the disk writes before triggering a
  966. // snapshot. or applied index might be greater than the last index in raft
  967. // storage, since the raft routine might be slower than apply routine.
  968. <-apply.notifyc
  969. s.triggerSnapshot(ep)
  970. select {
  971. // snapshot requested via send()
  972. case m := <-s.r.msgSnapC:
  973. merged := s.createMergedSnapshotMessage(m, ep.appliedt, ep.appliedi, ep.confState)
  974. s.sendMergedSnap(merged)
  975. default:
  976. }
  977. }
  978. func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
  979. if raft.IsEmptySnap(apply.snapshot) {
  980. return
  981. }
  982. lg := s.getLogger()
  983. if lg != nil {
  984. lg.Info(
  985. "applying snapshot",
  986. zap.Uint64("current-snapshot-index", ep.snapi),
  987. zap.Uint64("current-applied-index", ep.appliedi),
  988. zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index),
  989. zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term),
  990. )
  991. } else {
  992. plog.Infof("applying snapshot at index %d...", ep.snapi)
  993. }
  994. defer func() {
  995. if lg != nil {
  996. lg.Info(
  997. "applied snapshot",
  998. zap.Uint64("current-snapshot-index", ep.snapi),
  999. zap.Uint64("current-applied-index", ep.appliedi),
  1000. zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index),
  1001. zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term),
  1002. )
  1003. } else {
  1004. plog.Infof("finished applying incoming snapshot at index %d", ep.snapi)
  1005. }
  1006. }()
  1007. if apply.snapshot.Metadata.Index <= ep.appliedi {
  1008. if lg != nil {
  1009. lg.Panic(
  1010. "unexpected leader snapshot from outdated index",
  1011. zap.Uint64("current-snapshot-index", ep.snapi),
  1012. zap.Uint64("current-applied-index", ep.appliedi),
  1013. zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index),
  1014. zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term),
  1015. )
  1016. } else {
  1017. plog.Panicf("snapshot index [%d] should > appliedi[%d] + 1",
  1018. apply.snapshot.Metadata.Index, ep.appliedi)
  1019. }
  1020. }
  1021. // wait for raftNode to persist snapshot onto the disk
  1022. <-apply.notifyc
  1023. newbe, err := openSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot)
  1024. if err != nil {
  1025. if lg != nil {
  1026. lg.Panic("failed to open snapshot backend", zap.Error(err))
  1027. } else {
  1028. plog.Panic(err)
  1029. }
  1030. }
  1031. // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
  1032. // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
  1033. if s.lessor != nil {
  1034. if lg != nil {
  1035. lg.Info("restoring lease store")
  1036. } else {
  1037. plog.Info("recovering lessor...")
  1038. }
  1039. s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write() })
  1040. if lg != nil {
  1041. lg.Info("restored lease store")
  1042. } else {
  1043. plog.Info("finished recovering lessor")
  1044. }
  1045. }
  1046. if lg != nil {
  1047. lg.Info("restoring mvcc store")
  1048. } else {
  1049. plog.Info("restoring mvcc store...")
  1050. }
  1051. if err := s.kv.Restore(newbe); err != nil {
  1052. if lg != nil {
  1053. lg.Panic("failed to restore mvcc store", zap.Error(err))
  1054. } else {
  1055. plog.Panicf("restore KV error: %v", err)
  1056. }
  1057. }
  1058. s.consistIndex.setConsistentIndex(s.kv.ConsistentIndex())
  1059. if lg != nil {
  1060. lg.Info("restored mvcc store")
  1061. } else {
  1062. plog.Info("finished restoring mvcc store")
  1063. }
  1064. // Closing old backend might block until all the txns
  1065. // on the backend are finished.
  1066. // We do not want to wait on closing the old backend.
  1067. s.bemu.Lock()
  1068. oldbe := s.be
  1069. go func() {
  1070. if lg != nil {
  1071. lg.Info("closing old backend file")
  1072. } else {
  1073. plog.Info("closing old backend...")
  1074. }
  1075. defer func() {
  1076. if lg != nil {
  1077. lg.Info("closed old backend file")
  1078. } else {
  1079. plog.Info("finished closing old backend")
  1080. }
  1081. }()
  1082. if err := oldbe.Close(); err != nil {
  1083. if lg != nil {
  1084. lg.Panic("failed to close old backend", zap.Error(err))
  1085. } else {
  1086. plog.Panicf("close backend error: %v", err)
  1087. }
  1088. }
  1089. }()
  1090. s.be = newbe
  1091. s.bemu.Unlock()
  1092. if lg != nil {
  1093. lg.Info("restoring alarm store")
  1094. } else {
  1095. plog.Info("recovering alarms...")
  1096. }
  1097. if err := s.restoreAlarms(); err != nil {
  1098. if lg != nil {
  1099. lg.Panic("failed to restore alarm store", zap.Error(err))
  1100. } else {
  1101. plog.Panicf("restore alarms error: %v", err)
  1102. }
  1103. }
  1104. if lg != nil {
  1105. lg.Info("restored alarm store")
  1106. } else {
  1107. plog.Info("finished recovering alarms")
  1108. }
  1109. if s.authStore != nil {
  1110. if lg != nil {
  1111. lg.Info("restoring auth store")
  1112. } else {
  1113. plog.Info("recovering auth store...")
  1114. }
  1115. s.authStore.Recover(newbe)
  1116. if lg != nil {
  1117. lg.Info("restored auth store")
  1118. } else {
  1119. plog.Info("finished recovering auth store")
  1120. }
  1121. }
  1122. if lg != nil {
  1123. lg.Info("restoring v2 store")
  1124. } else {
  1125. plog.Info("recovering store v2...")
  1126. }
  1127. if err := s.v2store.Recovery(apply.snapshot.Data); err != nil {
  1128. if lg != nil {
  1129. lg.Panic("failed to restore v2 store", zap.Error(err))
  1130. } else {
  1131. plog.Panicf("recovery store error: %v", err)
  1132. }
  1133. }
  1134. if lg != nil {
  1135. lg.Info("restored v2 store")
  1136. } else {
  1137. plog.Info("finished recovering store v2")
  1138. }
  1139. s.cluster.SetBackend(s.be)
  1140. if lg != nil {
  1141. lg.Info("restoring cluster configuration")
  1142. } else {
  1143. plog.Info("recovering cluster configuration...")
  1144. }
  1145. s.cluster.Recover(api.UpdateCapability)
  1146. if lg != nil {
  1147. lg.Info("restored cluster configuration")
  1148. lg.Info("removing old peers from network")
  1149. } else {
  1150. plog.Info("finished recovering cluster configuration")
  1151. plog.Info("removing old peers from network...")
  1152. }
  1153. // recover raft transport
  1154. s.r.transport.RemoveAllPeers()
  1155. if lg != nil {
  1156. lg.Info("removed old peers from network")
  1157. lg.Info("adding peers from new cluster configuration")
  1158. } else {
  1159. plog.Info("finished removing old peers from network")
  1160. plog.Info("adding peers from new cluster configuration into network...")
  1161. }
  1162. for _, m := range s.cluster.Members() {
  1163. if m.ID == s.ID() {
  1164. continue
  1165. }
  1166. s.r.transport.AddPeer(m.ID, m.PeerURLs)
  1167. }
  1168. if lg != nil {
  1169. lg.Info("added peers from new cluster configuration")
  1170. } else {
  1171. plog.Info("finished adding peers from new cluster configuration into network...")
  1172. }
  1173. ep.appliedt = apply.snapshot.Metadata.Term
  1174. ep.appliedi = apply.snapshot.Metadata.Index
  1175. ep.snapi = ep.appliedi
  1176. ep.confState = apply.snapshot.Metadata.ConfState
  1177. }
  1178. func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) {
  1179. if len(apply.entries) == 0 {
  1180. return
  1181. }
  1182. firsti := apply.entries[0].Index
  1183. if firsti > ep.appliedi+1 {
  1184. if lg := s.getLogger(); lg != nil {
  1185. lg.Panic(
  1186. "unexpected committed entry index",
  1187. zap.Uint64("current-applied-index", ep.appliedi),
  1188. zap.Uint64("first-committed-entry-index", firsti),
  1189. )
  1190. } else {
  1191. plog.Panicf("first index of committed entry[%d] should <= appliedi[%d] + 1", firsti, ep.appliedi)
  1192. }
  1193. }
  1194. var ents []raftpb.Entry
  1195. if ep.appliedi+1-firsti < uint64(len(apply.entries)) {
  1196. ents = apply.entries[ep.appliedi+1-firsti:]
  1197. }
  1198. if len(ents) == 0 {
  1199. return
  1200. }
  1201. var shouldstop bool
  1202. if ep.appliedt, ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop {
  1203. go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf("the member has been permanently removed from the cluster"))
  1204. }
  1205. }
  1206. func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) {
  1207. if ep.appliedi-ep.snapi <= s.Cfg.SnapshotCount {
  1208. return
  1209. }
  1210. if lg := s.getLogger(); lg != nil {
  1211. lg.Info(
  1212. "triggering snapshot",
  1213. zap.String("local-member-id", s.ID().String()),
  1214. zap.Uint64("local-member-applied-index", ep.appliedi),
  1215. zap.Uint64("local-member-snapshot-index", ep.snapi),
  1216. zap.Uint64("local-member-snapshot-count", s.Cfg.SnapshotCount),
  1217. )
  1218. } else {
  1219. plog.Infof("start to snapshot (applied: %d, lastsnap: %d)", ep.appliedi, ep.snapi)
  1220. }
  1221. s.snapshot(ep.appliedi, ep.confState)
  1222. ep.snapi = ep.appliedi
  1223. }
  1224. func (s *EtcdServer) hasMultipleVotingMembers() bool {
  1225. return s.cluster != nil && len(s.cluster.VotingMemberIDs()) > 1
  1226. }
  1227. func (s *EtcdServer) isLeader() bool {
  1228. return uint64(s.ID()) == s.Lead()
  1229. }
  1230. // MoveLeader transfers the leader to the given transferee.
  1231. func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) error {
  1232. if !s.cluster.IsMemberExist(types.ID(transferee)) || s.cluster.Member(types.ID(transferee)).IsLearner {
  1233. return ErrBadLeaderTransferee
  1234. }
  1235. now := time.Now()
  1236. interval := time.Duration(s.Cfg.TickMs) * time.Millisecond
  1237. if lg := s.getLogger(); lg != nil {
  1238. lg.Info(
  1239. "leadership transfer starting",
  1240. zap.String("local-member-id", s.ID().String()),
  1241. zap.String("current-leader-member-id", types.ID(lead).String()),
  1242. zap.String("transferee-member-id", types.ID(transferee).String()),
  1243. )
  1244. } else {
  1245. plog.Infof("%s starts leadership transfer from %s to %s", s.ID(), types.ID(lead), types.ID(transferee))
  1246. }
  1247. s.r.TransferLeadership(ctx, lead, transferee)
  1248. for s.Lead() != transferee {
  1249. select {
  1250. case <-ctx.Done(): // time out
  1251. return ErrTimeoutLeaderTransfer
  1252. case <-time.After(interval):
  1253. }
  1254. }
  1255. // TODO: drain all requests, or drop all messages to the old leader
  1256. if lg := s.getLogger(); lg != nil {
  1257. lg.Info(
  1258. "leadership transfer finished",
  1259. zap.String("local-member-id", s.ID().String()),
  1260. zap.String("old-leader-member-id", types.ID(lead).String()),
  1261. zap.String("new-leader-member-id", types.ID(transferee).String()),
  1262. zap.Duration("took", time.Since(now)),
  1263. )
  1264. } else {
  1265. plog.Infof("%s finished leadership transfer from %s to %s (took %v)", s.ID(), types.ID(lead), types.ID(transferee), time.Since(now))
  1266. }
  1267. return nil
  1268. }
  1269. // TransferLeadership transfers the leader to the chosen transferee.
  1270. func (s *EtcdServer) TransferLeadership() error {
  1271. if !s.isLeader() {
  1272. if lg := s.getLogger(); lg != nil {
  1273. lg.Info(
  1274. "skipped leadership transfer; local server is not leader",
  1275. zap.String("local-member-id", s.ID().String()),
  1276. zap.String("current-leader-member-id", types.ID(s.Lead()).String()),
  1277. )
  1278. } else {
  1279. plog.Printf("skipped leadership transfer for stopping non-leader member")
  1280. }
  1281. return nil
  1282. }
  1283. if !s.hasMultipleVotingMembers() {
  1284. if lg := s.getLogger(); lg != nil {
  1285. lg.Info(
  1286. "skipped leadership transfer for single voting member cluster",
  1287. zap.String("local-member-id", s.ID().String()),
  1288. zap.String("current-leader-member-id", types.ID(s.Lead()).String()),
  1289. )
  1290. } else {
  1291. plog.Printf("skipped leadership transfer for single voting member cluster")
  1292. }
  1293. return nil
  1294. }
  1295. transferee, ok := longestConnected(s.r.transport, s.cluster.VotingMemberIDs())
  1296. if !ok {
  1297. return ErrUnhealthy
  1298. }
  1299. tm := s.Cfg.ReqTimeout()
  1300. ctx, cancel := context.WithTimeout(s.ctx, tm)
  1301. err := s.MoveLeader(ctx, s.Lead(), uint64(transferee))
  1302. cancel()
  1303. return err
  1304. }
  1305. // HardStop stops the server without coordination with other members in the cluster.
  1306. func (s *EtcdServer) HardStop() {
  1307. select {
  1308. case s.stop <- struct{}{}:
  1309. case <-s.done:
  1310. return
  1311. }
  1312. <-s.done
  1313. }
  1314. // Stop stops the server gracefully, and shuts down the running goroutine.
  1315. // Stop should be called after a Start(s), otherwise it will block forever.
  1316. // When stopping leader, Stop transfers its leadership to one of its peers
  1317. // before stopping the server.
  1318. // Stop terminates the Server and performs any necessary finalization.
  1319. // Do and Process cannot be called after Stop has been invoked.
  1320. func (s *EtcdServer) Stop() {
  1321. if err := s.TransferLeadership(); err != nil {
  1322. if lg := s.getLogger(); lg != nil {
  1323. lg.Warn("leadership transfer failed", zap.String("local-member-id", s.ID().String()), zap.Error(err))
  1324. } else {
  1325. plog.Warningf("%s failed to transfer leadership (%v)", s.ID(), err)
  1326. }
  1327. }
  1328. s.HardStop()
  1329. }
  1330. // ReadyNotify returns a channel that will be closed when the server
  1331. // is ready to serve client requests
  1332. func (s *EtcdServer) ReadyNotify() <-chan struct{} { return s.readych }
  1333. func (s *EtcdServer) stopWithDelay(d time.Duration, err error) {
  1334. select {
  1335. case <-time.After(d):
  1336. case <-s.done:
  1337. }
  1338. select {
  1339. case s.errorc <- err:
  1340. default:
  1341. }
  1342. }
  1343. // StopNotify returns a channel that receives a empty struct
  1344. // when the server is stopped.
  1345. func (s *EtcdServer) StopNotify() <-chan struct{} { return s.done }
  1346. func (s *EtcdServer) SelfStats() []byte { return s.stats.JSON() }
  1347. func (s *EtcdServer) LeaderStats() []byte {
  1348. lead := s.getLead()
  1349. if lead != uint64(s.id) {
  1350. return nil
  1351. }
  1352. return s.lstats.JSON()
  1353. }
  1354. func (s *EtcdServer) StoreStats() []byte { return s.v2store.JsonStats() }
  1355. func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error {
  1356. if s.authStore == nil {
  1357. // In the context of ordinary etcd process, s.authStore will never be nil.
  1358. // This branch is for handling cases in server_test.go
  1359. return nil
  1360. }
  1361. // Note that this permission check is done in the API layer,
  1362. // so TOCTOU problem can be caused potentially in a schedule like this:
  1363. // update membership with user A -> revoke root role of A -> apply membership change
  1364. // in the state machine layer
  1365. // However, both of membership change and role management requires the root privilege.
  1366. // So careful operation by admins can prevent the problem.
  1367. authInfo, err := s.AuthInfoFromCtx(ctx)
  1368. if err != nil {
  1369. return err
  1370. }
  1371. return s.AuthStore().IsAdminPermitted(authInfo)
  1372. }
  1373. func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
  1374. if err := s.checkMembershipOperationPermission(ctx); err != nil {
  1375. return nil, err
  1376. }
  1377. // TODO: might switch to less strict check when adding raft learner
  1378. if s.Cfg.StrictReconfigCheck {
  1379. // by default StrictReconfigCheck is enabled; reject new members if unhealthy
  1380. if !s.cluster.IsReadyToAddNewMember() {
  1381. if lg := s.getLogger(); lg != nil {
  1382. lg.Warn(
  1383. "rejecting member add request; not enough healthy members",
  1384. zap.String("local-member-id", s.ID().String()),
  1385. zap.String("requested-member-add", fmt.Sprintf("%+v", memb)),
  1386. zap.Error(ErrNotEnoughStartedMembers),
  1387. )
  1388. } else {
  1389. plog.Warningf("not enough started members, rejecting member add %+v", memb)
  1390. }
  1391. return nil, ErrNotEnoughStartedMembers
  1392. }
  1393. if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.Members()) {
  1394. if lg := s.getLogger(); lg != nil {
  1395. lg.Warn(
  1396. "rejecting member add request; local member has not been connected to all peers, reconfigure breaks active quorum",
  1397. zap.String("local-member-id", s.ID().String()),
  1398. zap.String("requested-member-add", fmt.Sprintf("%+v", memb)),
  1399. zap.Error(ErrUnhealthy),
  1400. )
  1401. } else {
  1402. plog.Warningf("not healthy for reconfigure, rejecting member add %+v", memb)
  1403. }
  1404. return nil, ErrUnhealthy
  1405. }
  1406. }
  1407. // TODO: move Member to protobuf type
  1408. b, err := json.Marshal(memb)
  1409. if err != nil {
  1410. return nil, err
  1411. }
  1412. cc := raftpb.ConfChange{
  1413. Type: raftpb.ConfChangeAddNode,
  1414. NodeID: uint64(memb.ID),
  1415. Context: b,
  1416. }
  1417. if memb.IsLearner {
  1418. cc.Type = raftpb.ConfChangeAddLearnerNode
  1419. }
  1420. return s.configure(ctx, cc)
  1421. }
  1422. func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
  1423. if err := s.checkMembershipOperationPermission(ctx); err != nil {
  1424. return nil, err
  1425. }
  1426. // by default StrictReconfigCheck is enabled; reject removal if leads to quorum loss
  1427. if err := s.mayRemoveMember(types.ID(id)); err != nil {
  1428. return nil, err
  1429. }
  1430. cc := raftpb.ConfChange{
  1431. Type: raftpb.ConfChangeRemoveNode,
  1432. NodeID: id,
  1433. }
  1434. return s.configure(ctx, cc)
  1435. }
  1436. // PromoteMember promotes a learner node to a voting node.
  1437. func (s *EtcdServer) PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
  1438. if err := s.checkMembershipOperationPermission(ctx); err != nil {
  1439. return nil, err
  1440. }
  1441. // check if we can promote this learner
  1442. if err := s.mayPromoteMember(types.ID(id)); err != nil {
  1443. return nil, err
  1444. }
  1445. // build the context for the promote confChange. mark IsLearner to false and IsPromote to true.
  1446. promoteChangeContext := membership.ConfigChangeContext{
  1447. Member: membership.Member{
  1448. ID: types.ID(id),
  1449. },
  1450. IsPromote: true,
  1451. }
  1452. b, err := json.Marshal(promoteChangeContext)
  1453. if err != nil {
  1454. return nil, err
  1455. }
  1456. cc := raftpb.ConfChange{
  1457. Type: raftpb.ConfChangeAddNode,
  1458. NodeID: id,
  1459. Context: b,
  1460. }
  1461. return s.configure(ctx, cc)
  1462. }
  1463. func (s *EtcdServer) mayPromoteMember(id types.ID) error {
  1464. if !s.Cfg.StrictReconfigCheck {
  1465. return nil
  1466. }
  1467. // TODO add more checks whether the member can be promoted.
  1468. return nil
  1469. }
  1470. func (s *EtcdServer) mayRemoveMember(id types.ID) error {
  1471. if !s.Cfg.StrictReconfigCheck {
  1472. return nil
  1473. }
  1474. if !s.cluster.IsReadyToRemoveMember(uint64(id)) {
  1475. if lg := s.getLogger(); lg != nil {
  1476. lg.Warn(
  1477. "rejecting member remove request; not enough healthy members",
  1478. zap.String("local-member-id", s.ID().String()),
  1479. zap.String("requested-member-remove-id", id.String()),
  1480. zap.Error(ErrNotEnoughStartedMembers),
  1481. )
  1482. } else {
  1483. plog.Warningf("not enough started members, rejecting remove member %s", id)
  1484. }
  1485. return ErrNotEnoughStartedMembers
  1486. }
  1487. // downed member is safe to remove since it's not part of the active quorum
  1488. if t := s.r.transport.ActiveSince(id); id != s.ID() && t.IsZero() {
  1489. return nil
  1490. }
  1491. // protect quorum if some members are down
  1492. m := s.cluster.Members()
  1493. active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), m)
  1494. if (active - 1) < 1+((len(m)-1)/2) {
  1495. if lg := s.getLogger(); lg != nil {
  1496. lg.Warn(
  1497. "rejecting member remove request; local member has not been connected to all peers, reconfigure breaks active quorum",
  1498. zap.String("local-member-id", s.ID().String()),
  1499. zap.String("requested-member-remove", id.String()),
  1500. zap.Int("active-peers", active),
  1501. zap.Error(ErrUnhealthy),
  1502. )
  1503. } else {
  1504. plog.Warningf("reconfigure breaks active quorum, rejecting remove member %s", id)
  1505. }
  1506. return ErrUnhealthy
  1507. }
  1508. return nil
  1509. }
  1510. func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
  1511. b, merr := json.Marshal(memb)
  1512. if merr != nil {
  1513. return nil, merr
  1514. }
  1515. if err := s.checkMembershipOperationPermission(ctx); err != nil {
  1516. return nil, err
  1517. }
  1518. cc := raftpb.ConfChange{
  1519. Type: raftpb.ConfChangeUpdateNode,
  1520. NodeID: uint64(memb.ID),
  1521. Context: b,
  1522. }
  1523. return s.configure(ctx, cc)
  1524. }
  1525. func (s *EtcdServer) setCommittedIndex(v uint64) {
  1526. atomic.StoreUint64(&s.committedIndex, v)
  1527. }
  1528. func (s *EtcdServer) getCommittedIndex() uint64 {
  1529. return atomic.LoadUint64(&s.committedIndex)
  1530. }
  1531. func (s *EtcdServer) setAppliedIndex(v uint64) {
  1532. atomic.StoreUint64(&s.appliedIndex, v)
  1533. }
  1534. func (s *EtcdServer) getAppliedIndex() uint64 {
  1535. return atomic.LoadUint64(&s.appliedIndex)
  1536. }
  1537. func (s *EtcdServer) setTerm(v uint64) {
  1538. atomic.StoreUint64(&s.term, v)
  1539. }
  1540. func (s *EtcdServer) getTerm() uint64 {
  1541. return atomic.LoadUint64(&s.term)
  1542. }
  1543. func (s *EtcdServer) setLead(v uint64) {
  1544. atomic.StoreUint64(&s.lead, v)
  1545. }
  1546. func (s *EtcdServer) getLead() uint64 {
  1547. return atomic.LoadUint64(&s.lead)
  1548. }
  1549. func (s *EtcdServer) leaderChangedNotify() <-chan struct{} {
  1550. s.leaderChangedMu.RLock()
  1551. defer s.leaderChangedMu.RUnlock()
  1552. return s.leaderChanged
  1553. }
  1554. // RaftStatusGetter represents etcd server and Raft progress.
  1555. type RaftStatusGetter interface {
  1556. ID() types.ID
  1557. Leader() types.ID
  1558. CommittedIndex() uint64
  1559. AppliedIndex() uint64
  1560. Term() uint64
  1561. }
  1562. func (s *EtcdServer) ID() types.ID { return s.id }
  1563. func (s *EtcdServer) Leader() types.ID { return types.ID(s.getLead()) }
  1564. func (s *EtcdServer) Lead() uint64 { return s.getLead() }
  1565. func (s *EtcdServer) CommittedIndex() uint64 { return s.getCommittedIndex() }
  1566. func (s *EtcdServer) AppliedIndex() uint64 { return s.getAppliedIndex() }
  1567. func (s *EtcdServer) Term() uint64 { return s.getTerm() }
  1568. type confChangeResponse struct {
  1569. membs []*membership.Member
  1570. err error
  1571. }
  1572. // configure sends a configuration change through consensus and
  1573. // then waits for it to be applied to the server. It
  1574. // will block until the change is performed or there is an error.
  1575. func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*membership.Member, error) {
  1576. cc.ID = s.reqIDGen.Next()
  1577. ch := s.w.Register(cc.ID)
  1578. start := time.Now()
  1579. if err := s.r.ProposeConfChange(ctx, cc); err != nil {
  1580. s.w.Trigger(cc.ID, nil)
  1581. return nil, err
  1582. }
  1583. select {
  1584. case x := <-ch:
  1585. if x == nil {
  1586. if lg := s.getLogger(); lg != nil {
  1587. lg.Panic("failed to configure")
  1588. } else {
  1589. plog.Panicf("configure trigger value should never be nil")
  1590. }
  1591. }
  1592. resp := x.(*confChangeResponse)
  1593. if lg := s.getLogger(); lg != nil {
  1594. lg.Info(
  1595. "applied a configuration change through raft",
  1596. zap.String("local-member-id", s.ID().String()),
  1597. zap.String("raft-conf-change", cc.Type.String()),
  1598. zap.String("raft-conf-change-node-id", types.ID(cc.NodeID).String()),
  1599. )
  1600. }
  1601. return resp.membs, resp.err
  1602. case <-ctx.Done():
  1603. s.w.Trigger(cc.ID, nil) // GC wait
  1604. return nil, s.parseProposeCtxErr(ctx.Err(), start)
  1605. case <-s.stopping:
  1606. return nil, ErrStopped
  1607. }
  1608. }
  1609. // sync proposes a SYNC request and is non-blocking.
  1610. // This makes no guarantee that the request will be proposed or performed.
  1611. // The request will be canceled after the given timeout.
  1612. func (s *EtcdServer) sync(timeout time.Duration) {
  1613. req := pb.Request{
  1614. Method: "SYNC",
  1615. ID: s.reqIDGen.Next(),
  1616. Time: time.Now().UnixNano(),
  1617. }
  1618. data := pbutil.MustMarshal(&req)
  1619. // There is no promise that node has leader when do SYNC request,
  1620. // so it uses goroutine to propose.
  1621. ctx, cancel := context.WithTimeout(s.ctx, timeout)
  1622. s.goAttach(func() {
  1623. s.r.Propose(ctx, data)
  1624. cancel()
  1625. })
  1626. }
  1627. // publish registers server information into the cluster. The information
  1628. // is the JSON representation of this server's member struct, updated with the
  1629. // static clientURLs of the server.
  1630. // The function keeps attempting to register until it succeeds,
  1631. // or its server is stopped.
  1632. func (s *EtcdServer) publish(timeout time.Duration) {
  1633. b, err := json.Marshal(s.attributes)
  1634. if err != nil {
  1635. if lg := s.getLogger(); lg != nil {
  1636. lg.Panic("failed to marshal JSON", zap.Error(err))
  1637. } else {
  1638. plog.Panicf("json marshal error: %v", err)
  1639. }
  1640. return
  1641. }
  1642. req := pb.Request{
  1643. Method: "PUT",
  1644. Path: membership.MemberAttributesStorePath(s.id),
  1645. Val: string(b),
  1646. }
  1647. for {
  1648. ctx, cancel := context.WithTimeout(s.ctx, timeout)
  1649. _, err := s.Do(ctx, req)
  1650. cancel()
  1651. switch err {
  1652. case nil:
  1653. close(s.readych)
  1654. if lg := s.getLogger(); lg != nil {
  1655. lg.Info(
  1656. "published local member to cluster through raft",
  1657. zap.String("local-member-id", s.ID().String()),
  1658. zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)),
  1659. zap.String("request-path", req.Path),
  1660. zap.String("cluster-id", s.cluster.ID().String()),
  1661. zap.Duration("publish-timeout", timeout),
  1662. )
  1663. } else {
  1664. plog.Infof("published %+v to cluster %s", s.attributes, s.cluster.ID())
  1665. }
  1666. return
  1667. case ErrStopped:
  1668. if lg := s.getLogger(); lg != nil {
  1669. lg.Warn(
  1670. "stopped publish because server is stopped",
  1671. zap.String("local-member-id", s.ID().String()),
  1672. zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)),
  1673. zap.Duration("publish-timeout", timeout),
  1674. zap.Error(err),
  1675. )
  1676. } else {
  1677. plog.Infof("aborting publish because server is stopped")
  1678. }
  1679. return
  1680. default:
  1681. if lg := s.getLogger(); lg != nil {
  1682. lg.Warn(
  1683. "failed to publish local member to cluster through raft",
  1684. zap.String("local-member-id", s.ID().String()),
  1685. zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)),
  1686. zap.String("request-path", req.Path),
  1687. zap.Duration("publish-timeout", timeout),
  1688. zap.Error(err),
  1689. )
  1690. } else {
  1691. plog.Errorf("publish error: %v", err)
  1692. }
  1693. }
  1694. }
  1695. }
  1696. func (s *EtcdServer) sendMergedSnap(merged snap.Message) {
  1697. atomic.AddInt64(&s.inflightSnapshots, 1)
  1698. lg := s.getLogger()
  1699. fields := []zap.Field{
  1700. zap.String("from", s.ID().String()),
  1701. zap.String("to", types.ID(merged.To).String()),
  1702. zap.Int64("bytes", merged.TotalSize),
  1703. zap.String("size", humanize.Bytes(uint64(merged.TotalSize))),
  1704. }
  1705. now := time.Now()
  1706. s.r.transport.SendSnapshot(merged)
  1707. if lg != nil {
  1708. lg.Info("sending merged snapshot", fields...)
  1709. }
  1710. s.goAttach(func() {
  1711. select {
  1712. case ok := <-merged.CloseNotify():
  1713. // delay releasing inflight snapshot for another 30 seconds to
  1714. // block log compaction.
  1715. // If the follower still fails to catch up, it is probably just too slow
  1716. // to catch up. We cannot avoid the snapshot cycle anyway.
  1717. if ok {
  1718. select {
  1719. case <-time.After(releaseDelayAfterSnapshot):
  1720. case <-s.stopping:
  1721. }
  1722. }
  1723. atomic.AddInt64(&s.inflightSnapshots, -1)
  1724. if lg != nil {
  1725. lg.Info("sent merged snapshot", append(fields, zap.Duration("took", time.Since(now)))...)
  1726. }
  1727. case <-s.stopping:
  1728. if lg != nil {
  1729. lg.Warn("canceled sending merged snapshot; server stopping", fields...)
  1730. }
  1731. return
  1732. }
  1733. })
  1734. }
  1735. // apply takes entries received from Raft (after it has been committed) and
  1736. // applies them to the current state of the EtcdServer.
  1737. // The given entries should not be empty.
  1738. func (s *EtcdServer) apply(
  1739. es []raftpb.Entry,
  1740. confState *raftpb.ConfState,
  1741. ) (appliedt uint64, appliedi uint64, shouldStop bool) {
  1742. for i := range es {
  1743. e := es[i]
  1744. switch e.Type {
  1745. case raftpb.EntryNormal:
  1746. s.applyEntryNormal(&e)
  1747. s.setAppliedIndex(e.Index)
  1748. s.setTerm(e.Term)
  1749. case raftpb.EntryConfChange:
  1750. // set the consistent index of current executing entry
  1751. if e.Index > s.consistIndex.ConsistentIndex() {
  1752. s.consistIndex.setConsistentIndex(e.Index)
  1753. }
  1754. var cc raftpb.ConfChange
  1755. pbutil.MustUnmarshal(&cc, e.Data)
  1756. removedSelf, err := s.applyConfChange(cc, confState)
  1757. s.setAppliedIndex(e.Index)
  1758. s.setTerm(e.Term)
  1759. shouldStop = shouldStop || removedSelf
  1760. s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), err})
  1761. default:
  1762. if lg := s.getLogger(); lg != nil {
  1763. lg.Panic(
  1764. "unknown entry type; must be either EntryNormal or EntryConfChange",
  1765. zap.String("type", e.Type.String()),
  1766. )
  1767. } else {
  1768. plog.Panicf("entry type should be either EntryNormal or EntryConfChange")
  1769. }
  1770. }
  1771. appliedi, appliedt = e.Index, e.Term
  1772. }
  1773. return appliedt, appliedi, shouldStop
  1774. }
  1775. // applyEntryNormal apples an EntryNormal type raftpb request to the EtcdServer
  1776. func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
  1777. shouldApplyV3 := false
  1778. if e.Index > s.consistIndex.ConsistentIndex() {
  1779. // set the consistent index of current executing entry
  1780. s.consistIndex.setConsistentIndex(e.Index)
  1781. shouldApplyV3 = true
  1782. }
  1783. // raft state machine may generate noop entry when leader confirmation.
  1784. // skip it in advance to avoid some potential bug in the future
  1785. if len(e.Data) == 0 {
  1786. select {
  1787. case s.forceVersionC <- struct{}{}:
  1788. default:
  1789. }
  1790. // promote lessor when the local member is leader and finished
  1791. // applying all entries from the last term.
  1792. if s.isLeader() {
  1793. s.lessor.Promote(s.Cfg.electionTimeout())
  1794. }
  1795. return
  1796. }
  1797. var raftReq pb.InternalRaftRequest
  1798. if !pbutil.MaybeUnmarshal(&raftReq, e.Data) { // backward compatible
  1799. var r pb.Request
  1800. rp := &r
  1801. pbutil.MustUnmarshal(rp, e.Data)
  1802. s.w.Trigger(r.ID, s.applyV2Request((*RequestV2)(rp)))
  1803. return
  1804. }
  1805. if raftReq.V2 != nil {
  1806. req := (*RequestV2)(raftReq.V2)
  1807. s.w.Trigger(req.ID, s.applyV2Request(req))
  1808. return
  1809. }
  1810. // do not re-apply applied entries.
  1811. if !shouldApplyV3 {
  1812. return
  1813. }
  1814. id := raftReq.ID
  1815. if id == 0 {
  1816. id = raftReq.Header.ID
  1817. }
  1818. var ar *applyResult
  1819. needResult := s.w.IsRegistered(id)
  1820. if needResult || !noSideEffect(&raftReq) {
  1821. if !needResult && raftReq.Txn != nil {
  1822. removeNeedlessRangeReqs(raftReq.Txn)
  1823. }
  1824. ar = s.applyV3.Apply(&raftReq)
  1825. }
  1826. if ar == nil {
  1827. return
  1828. }
  1829. if ar.err != ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 {
  1830. s.w.Trigger(id, ar)
  1831. return
  1832. }
  1833. if lg := s.getLogger(); lg != nil {
  1834. lg.Warn(
  1835. "message exceeded backend quota; raising alarm",
  1836. zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes),
  1837. zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))),
  1838. zap.Error(ar.err),
  1839. )
  1840. } else {
  1841. plog.Errorf("applying raft message exceeded backend quota")
  1842. }
  1843. s.goAttach(func() {
  1844. a := &pb.AlarmRequest{
  1845. MemberID: uint64(s.ID()),
  1846. Action: pb.AlarmRequest_ACTIVATE,
  1847. Alarm: pb.AlarmType_NOSPACE,
  1848. }
  1849. s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a})
  1850. s.w.Trigger(id, ar)
  1851. })
  1852. }
  1853. // applyConfChange applies a ConfChange to the server. It is only
  1854. // invoked with a ConfChange that has already passed through Raft
  1855. func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState) (bool, error) {
  1856. if err := s.cluster.ValidateConfigurationChange(cc); err != nil {
  1857. cc.NodeID = raft.None
  1858. s.r.ApplyConfChange(cc)
  1859. return false, err
  1860. }
  1861. lg := s.getLogger()
  1862. *confState = *s.r.ApplyConfChange(cc)
  1863. switch cc.Type {
  1864. case raftpb.ConfChangeAddNode, raftpb.ConfChangeAddLearnerNode:
  1865. confChangeContext := new(membership.ConfigChangeContext)
  1866. if err := json.Unmarshal(cc.Context, confChangeContext); err != nil {
  1867. if lg != nil {
  1868. lg.Panic("failed to unmarshal member", zap.Error(err))
  1869. } else {
  1870. plog.Panicf("unmarshal member should never fail: %v", err)
  1871. }
  1872. }
  1873. if cc.NodeID != uint64(confChangeContext.Member.ID) {
  1874. if lg != nil {
  1875. lg.Panic(
  1876. "got different member ID",
  1877. zap.String("member-id-from-config-change-entry", types.ID(cc.NodeID).String()),
  1878. zap.String("member-id-from-message", confChangeContext.Member.ID.String()),
  1879. )
  1880. } else {
  1881. plog.Panicf("nodeID should always be equal to member ID")
  1882. }
  1883. }
  1884. if confChangeContext.IsPromote {
  1885. s.cluster.PromoteMember(confChangeContext.Member.ID)
  1886. } else {
  1887. s.cluster.AddMember(&confChangeContext.Member)
  1888. if confChangeContext.Member.ID != s.id {
  1889. s.r.transport.AddPeer(confChangeContext.Member.ID, confChangeContext.PeerURLs)
  1890. }
  1891. }
  1892. case raftpb.ConfChangeRemoveNode:
  1893. id := types.ID(cc.NodeID)
  1894. s.cluster.RemoveMember(id)
  1895. if id == s.id {
  1896. return true, nil
  1897. }
  1898. s.r.transport.RemovePeer(id)
  1899. case raftpb.ConfChangeUpdateNode:
  1900. m := new(membership.Member)
  1901. if err := json.Unmarshal(cc.Context, m); err != nil {
  1902. if lg != nil {
  1903. lg.Panic("failed to unmarshal member", zap.Error(err))
  1904. } else {
  1905. plog.Panicf("unmarshal member should never fail: %v", err)
  1906. }
  1907. }
  1908. if cc.NodeID != uint64(m.ID) {
  1909. if lg != nil {
  1910. lg.Panic(
  1911. "got different member ID",
  1912. zap.String("member-id-from-config-change-entry", types.ID(cc.NodeID).String()),
  1913. zap.String("member-id-from-message", m.ID.String()),
  1914. )
  1915. } else {
  1916. plog.Panicf("nodeID should always be equal to member ID")
  1917. }
  1918. }
  1919. s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes)
  1920. if m.ID != s.id {
  1921. s.r.transport.UpdatePeer(m.ID, m.PeerURLs)
  1922. }
  1923. }
  1924. return false, nil
  1925. }
  1926. // TODO: non-blocking snapshot
  1927. func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) {
  1928. clone := s.v2store.Clone()
  1929. // commit kv to write metadata (for example: consistent index) to disk.
  1930. // KV().commit() updates the consistent index in backend.
  1931. // All operations that update consistent index must be called sequentially
  1932. // from applyAll function.
  1933. // So KV().Commit() cannot run in parallel with apply. It has to be called outside
  1934. // the go routine created below.
  1935. s.KV().Commit()
  1936. s.goAttach(func() {
  1937. lg := s.getLogger()
  1938. d, err := clone.SaveNoCopy()
  1939. // TODO: current store will never fail to do a snapshot
  1940. // what should we do if the store might fail?
  1941. if err != nil {
  1942. if lg != nil {
  1943. lg.Panic("failed to save v2 store", zap.Error(err))
  1944. } else {
  1945. plog.Panicf("store save should never fail: %v", err)
  1946. }
  1947. }
  1948. snap, err := s.r.raftStorage.CreateSnapshot(snapi, &confState, d)
  1949. if err != nil {
  1950. // the snapshot was done asynchronously with the progress of raft.
  1951. // raft might have already got a newer snapshot.
  1952. if err == raft.ErrSnapOutOfDate {
  1953. return
  1954. }
  1955. if lg != nil {
  1956. lg.Panic("failed to create snapshot", zap.Error(err))
  1957. } else {
  1958. plog.Panicf("unexpected create snapshot error %v", err)
  1959. }
  1960. }
  1961. // SaveSnap saves the snapshot and releases the locked wal files
  1962. // to the snapshot index.
  1963. if err = s.r.storage.SaveSnap(snap); err != nil {
  1964. if lg != nil {
  1965. lg.Panic("failed to save snapshot", zap.Error(err))
  1966. } else {
  1967. plog.Fatalf("save snapshot error: %v", err)
  1968. }
  1969. }
  1970. if lg != nil {
  1971. lg.Info(
  1972. "saved snapshot",
  1973. zap.Uint64("snapshot-index", snap.Metadata.Index),
  1974. )
  1975. } else {
  1976. plog.Infof("saved snapshot at index %d", snap.Metadata.Index)
  1977. }
  1978. // When sending a snapshot, etcd will pause compaction.
  1979. // After receives a snapshot, the slow follower needs to get all the entries right after
  1980. // the snapshot sent to catch up. If we do not pause compaction, the log entries right after
  1981. // the snapshot sent might already be compacted. It happens when the snapshot takes long time
  1982. // to send and save. Pausing compaction avoids triggering a snapshot sending cycle.
  1983. if atomic.LoadInt64(&s.inflightSnapshots) != 0 {
  1984. if lg != nil {
  1985. lg.Info("skip compaction since there is an inflight snapshot")
  1986. } else {
  1987. plog.Infof("skip compaction since there is an inflight snapshot")
  1988. }
  1989. return
  1990. }
  1991. // keep some in memory log entries for slow followers.
  1992. compacti := uint64(1)
  1993. if snapi > s.Cfg.SnapshotCatchUpEntries {
  1994. compacti = snapi - s.Cfg.SnapshotCatchUpEntries
  1995. }
  1996. err = s.r.raftStorage.Compact(compacti)
  1997. if err != nil {
  1998. // the compaction was done asynchronously with the progress of raft.
  1999. // raft log might already been compact.
  2000. if err == raft.ErrCompacted {
  2001. return
  2002. }
  2003. if lg != nil {
  2004. lg.Panic("failed to compact", zap.Error(err))
  2005. } else {
  2006. plog.Panicf("unexpected compaction error %v", err)
  2007. }
  2008. }
  2009. if lg != nil {
  2010. lg.Info(
  2011. "compacted Raft logs",
  2012. zap.Uint64("compact-index", compacti),
  2013. )
  2014. } else {
  2015. plog.Infof("compacted raft log at %d", compacti)
  2016. }
  2017. })
  2018. }
  2019. // CutPeer drops messages to the specified peer.
  2020. func (s *EtcdServer) CutPeer(id types.ID) {
  2021. tr, ok := s.r.transport.(*rafthttp.Transport)
  2022. if ok {
  2023. tr.CutPeer(id)
  2024. }
  2025. }
  2026. // MendPeer recovers the message dropping behavior of the given peer.
  2027. func (s *EtcdServer) MendPeer(id types.ID) {
  2028. tr, ok := s.r.transport.(*rafthttp.Transport)
  2029. if ok {
  2030. tr.MendPeer(id)
  2031. }
  2032. }
  2033. func (s *EtcdServer) PauseSending() { s.r.pauseSending() }
  2034. func (s *EtcdServer) ResumeSending() { s.r.resumeSending() }
  2035. func (s *EtcdServer) ClusterVersion() *semver.Version {
  2036. if s.cluster == nil {
  2037. return nil
  2038. }
  2039. return s.cluster.Version()
  2040. }
  2041. // monitorVersions checks the member's version every monitorVersionInterval.
  2042. // It updates the cluster version if all members agrees on a higher one.
  2043. // It prints out log if there is a member with a higher version than the
  2044. // local version.
  2045. func (s *EtcdServer) monitorVersions() {
  2046. for {
  2047. select {
  2048. case <-s.forceVersionC:
  2049. case <-time.After(monitorVersionInterval):
  2050. case <-s.stopping:
  2051. return
  2052. }
  2053. if s.Leader() != s.ID() {
  2054. continue
  2055. }
  2056. v := decideClusterVersion(s.getLogger(), getVersions(s.getLogger(), s.cluster, s.id, s.peerRt))
  2057. if v != nil {
  2058. // only keep major.minor version for comparison
  2059. v = &semver.Version{
  2060. Major: v.Major,
  2061. Minor: v.Minor,
  2062. }
  2063. }
  2064. // if the current version is nil:
  2065. // 1. use the decided version if possible
  2066. // 2. or use the min cluster version
  2067. if s.cluster.Version() == nil {
  2068. verStr := version.MinClusterVersion
  2069. if v != nil {
  2070. verStr = v.String()
  2071. }
  2072. s.goAttach(func() { s.updateClusterVersion(verStr) })
  2073. continue
  2074. }
  2075. // update cluster version only if the decided version is greater than
  2076. // the current cluster version
  2077. if v != nil && s.cluster.Version().LessThan(*v) {
  2078. s.goAttach(func() { s.updateClusterVersion(v.String()) })
  2079. }
  2080. }
  2081. }
  2082. func (s *EtcdServer) updateClusterVersion(ver string) {
  2083. lg := s.getLogger()
  2084. if s.cluster.Version() == nil {
  2085. if lg != nil {
  2086. lg.Info(
  2087. "setting up initial cluster version",
  2088. zap.String("cluster-version", version.Cluster(ver)),
  2089. )
  2090. } else {
  2091. plog.Infof("setting up the initial cluster version to %s", version.Cluster(ver))
  2092. }
  2093. } else {
  2094. if lg != nil {
  2095. lg.Info(
  2096. "updating cluster version",
  2097. zap.String("from", version.Cluster(s.cluster.Version().String())),
  2098. zap.String("to", version.Cluster(ver)),
  2099. )
  2100. } else {
  2101. plog.Infof("updating the cluster version from %s to %s", version.Cluster(s.cluster.Version().String()), version.Cluster(ver))
  2102. }
  2103. }
  2104. req := pb.Request{
  2105. Method: "PUT",
  2106. Path: membership.StoreClusterVersionKey(),
  2107. Val: ver,
  2108. }
  2109. ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout())
  2110. _, err := s.Do(ctx, req)
  2111. cancel()
  2112. switch err {
  2113. case nil:
  2114. if lg != nil {
  2115. lg.Info("cluster version is updated", zap.String("cluster-version", version.Cluster(ver)))
  2116. }
  2117. return
  2118. case ErrStopped:
  2119. if lg != nil {
  2120. lg.Warn("aborting cluster version update; server is stopped", zap.Error(err))
  2121. } else {
  2122. plog.Infof("aborting update cluster version because server is stopped")
  2123. }
  2124. return
  2125. default:
  2126. if lg != nil {
  2127. lg.Warn("failed to update cluster version", zap.Error(err))
  2128. } else {
  2129. plog.Errorf("error updating cluster version (%v)", err)
  2130. }
  2131. }
  2132. }
  2133. func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error {
  2134. switch err {
  2135. case context.Canceled:
  2136. return ErrCanceled
  2137. case context.DeadlineExceeded:
  2138. s.leadTimeMu.RLock()
  2139. curLeadElected := s.leadElectedTime
  2140. s.leadTimeMu.RUnlock()
  2141. prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond)
  2142. if start.After(prevLeadLost) && start.Before(curLeadElected) {
  2143. return ErrTimeoutDueToLeaderFail
  2144. }
  2145. lead := types.ID(s.getLead())
  2146. switch lead {
  2147. case types.ID(raft.None):
  2148. // TODO: return error to specify it happens because the cluster does not have leader now
  2149. case s.ID():
  2150. if !isConnectedToQuorumSince(s.r.transport, start, s.ID(), s.cluster.Members()) {
  2151. return ErrTimeoutDueToConnectionLost
  2152. }
  2153. default:
  2154. if !isConnectedSince(s.r.transport, start, lead) {
  2155. return ErrTimeoutDueToConnectionLost
  2156. }
  2157. }
  2158. return ErrTimeout
  2159. default:
  2160. return err
  2161. }
  2162. }
  2163. func (s *EtcdServer) KV() mvcc.ConsistentWatchableKV { return s.kv }
  2164. func (s *EtcdServer) Backend() backend.Backend {
  2165. s.bemu.Lock()
  2166. defer s.bemu.Unlock()
  2167. return s.be
  2168. }
  2169. func (s *EtcdServer) AuthStore() auth.AuthStore { return s.authStore }
  2170. func (s *EtcdServer) restoreAlarms() error {
  2171. s.applyV3 = s.newApplierV3()
  2172. as, err := v3alarm.NewAlarmStore(s)
  2173. if err != nil {
  2174. return err
  2175. }
  2176. s.alarmStore = as
  2177. if len(as.Get(pb.AlarmType_NOSPACE)) > 0 {
  2178. s.applyV3 = newApplierV3Capped(s.applyV3)
  2179. }
  2180. if len(as.Get(pb.AlarmType_CORRUPT)) > 0 {
  2181. s.applyV3 = newApplierV3Corrupt(s.applyV3)
  2182. }
  2183. return nil
  2184. }
  2185. // goAttach creates a goroutine on a given function and tracks it using
  2186. // the etcdserver waitgroup.
  2187. func (s *EtcdServer) goAttach(f func()) {
  2188. s.wgMu.RLock() // this blocks with ongoing close(s.stopping)
  2189. defer s.wgMu.RUnlock()
  2190. select {
  2191. case <-s.stopping:
  2192. if lg := s.getLogger(); lg != nil {
  2193. lg.Warn("server has stopped; skipping goAttach")
  2194. } else {
  2195. plog.Warning("server has stopped (skipping goAttach)")
  2196. }
  2197. return
  2198. default:
  2199. }
  2200. // now safe to add since waitgroup wait has not started yet
  2201. s.wg.Add(1)
  2202. go func() {
  2203. defer s.wg.Done()
  2204. f()
  2205. }()
  2206. }
  2207. func (s *EtcdServer) Alarms() []*pb.AlarmMember {
  2208. return s.alarmStore.Get(pb.AlarmType_NONE)
  2209. }
  2210. func (s *EtcdServer) Logger() *zap.Logger {
  2211. return s.lg
  2212. }
  2213. // IsLearner returns if the local member is raft learner
  2214. func (s *EtcdServer) IsLearner() bool {
  2215. return s.cluster.IsLocalMemberLearner()
  2216. }