server.go 75 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package etcdserver
  15. import (
  16. "context"
  17. "encoding/json"
  18. "expvar"
  19. "fmt"
  20. "math"
  21. "math/rand"
  22. "net/http"
  23. "os"
  24. "path"
  25. "regexp"
  26. "sync"
  27. "sync/atomic"
  28. "time"
  29. "go.etcd.io/etcd/auth"
  30. "go.etcd.io/etcd/etcdserver/api"
  31. "go.etcd.io/etcd/etcdserver/api/membership"
  32. "go.etcd.io/etcd/etcdserver/api/rafthttp"
  33. "go.etcd.io/etcd/etcdserver/api/snap"
  34. "go.etcd.io/etcd/etcdserver/api/v2discovery"
  35. "go.etcd.io/etcd/etcdserver/api/v2http/httptypes"
  36. stats "go.etcd.io/etcd/etcdserver/api/v2stats"
  37. "go.etcd.io/etcd/etcdserver/api/v2store"
  38. "go.etcd.io/etcd/etcdserver/api/v3alarm"
  39. "go.etcd.io/etcd/etcdserver/api/v3compactor"
  40. pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
  41. "go.etcd.io/etcd/lease"
  42. "go.etcd.io/etcd/lease/leasehttp"
  43. "go.etcd.io/etcd/mvcc"
  44. "go.etcd.io/etcd/mvcc/backend"
  45. "go.etcd.io/etcd/pkg/fileutil"
  46. "go.etcd.io/etcd/pkg/idutil"
  47. "go.etcd.io/etcd/pkg/pbutil"
  48. "go.etcd.io/etcd/pkg/runtime"
  49. "go.etcd.io/etcd/pkg/schedule"
  50. "go.etcd.io/etcd/pkg/types"
  51. "go.etcd.io/etcd/pkg/wait"
  52. "go.etcd.io/etcd/raft"
  53. "go.etcd.io/etcd/raft/raftpb"
  54. "go.etcd.io/etcd/version"
  55. "go.etcd.io/etcd/wal"
  56. "github.com/coreos/go-semver/semver"
  57. "github.com/coreos/pkg/capnslog"
  58. humanize "github.com/dustin/go-humanize"
  59. "github.com/prometheus/client_golang/prometheus"
  60. "go.uber.org/zap"
  61. )
  62. const (
  63. DefaultSnapshotCount = 100000
  64. // DefaultSnapshotCatchUpEntries is the number of entries for a slow follower
  65. // to catch-up after compacting the raft storage entries.
  66. // We expect the follower has a millisecond level latency with the leader.
  67. // The max throughput is around 10K. Keep a 5K entries is enough for helping
  68. // follower to catch up.
  69. DefaultSnapshotCatchUpEntries uint64 = 5000
  70. StoreClusterPrefix = "/0"
  71. StoreKeysPrefix = "/1"
  72. // HealthInterval is the minimum time the cluster should be healthy
  73. // before accepting add member requests.
  74. HealthInterval = 5 * time.Second
  75. purgeFileInterval = 30 * time.Second
  76. // monitorVersionInterval should be smaller than the timeout
  77. // on the connection. Or we will not be able to reuse the connection
  78. // (since it will timeout).
  79. monitorVersionInterval = rafthttp.ConnWriteTimeout - time.Second
  80. // max number of in-flight snapshot messages etcdserver allows to have
  81. // This number is more than enough for most clusters with 5 machines.
  82. maxInFlightMsgSnap = 16
  83. releaseDelayAfterSnapshot = 30 * time.Second
  84. // maxPendingRevokes is the maximum number of outstanding expired lease revocations.
  85. maxPendingRevokes = 16
  86. recommendedMaxRequestBytes = 10 * 1024 * 1024
  87. readyPercent = 0.9
  88. )
  89. var (
  90. plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "etcdserver")
  91. storeMemberAttributeRegexp = regexp.MustCompile(path.Join(membership.StoreMembersPrefix, "[[:xdigit:]]{1,16}", "attributes"))
  92. )
  93. func init() {
  94. rand.Seed(time.Now().UnixNano())
  95. expvar.Publish(
  96. "file_descriptor_limit",
  97. expvar.Func(
  98. func() interface{} {
  99. n, _ := runtime.FDLimit()
  100. return n
  101. },
  102. ),
  103. )
  104. }
  105. type Response struct {
  106. Term uint64
  107. Index uint64
  108. Event *v2store.Event
  109. Watcher v2store.Watcher
  110. Err error
  111. }
  112. type ServerV2 interface {
  113. Server
  114. Leader() types.ID
  115. // Do takes a V2 request and attempts to fulfill it, returning a Response.
  116. Do(ctx context.Context, r pb.Request) (Response, error)
  117. stats.Stats
  118. ClientCertAuthEnabled() bool
  119. }
  120. type ServerV3 interface {
  121. Server
  122. RaftStatusGetter
  123. }
  124. func (s *EtcdServer) ClientCertAuthEnabled() bool { return s.Cfg.ClientCertAuthEnabled }
  125. type Server interface {
  126. // AddMember attempts to add a member into the cluster. It will return
  127. // ErrIDRemoved if member ID is removed from the cluster, or return
  128. // ErrIDExists if member ID exists in the cluster.
  129. AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error)
  130. // RemoveMember attempts to remove a member from the cluster. It will
  131. // return ErrIDRemoved if member ID is removed from the cluster, or return
  132. // ErrIDNotFound if member ID is not in the cluster.
  133. RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error)
  134. // UpdateMember attempts to update an existing member in the cluster. It will
  135. // return ErrIDNotFound if the member ID does not exist.
  136. UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error)
  137. // PromoteMember attempts to promote a non-voting node to a voting node. It will
  138. // return ErrIDNotFound if the member ID does not exist.
  139. // return ErrLearnerNotReady if the member are not ready.
  140. // return ErrMemberNotLearner if the member is not a learner.
  141. PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error)
  142. // ClusterVersion is the cluster-wide minimum major.minor version.
  143. // Cluster version is set to the min version that an etcd member is
  144. // compatible with when first bootstrap.
  145. //
  146. // ClusterVersion is nil until the cluster is bootstrapped (has a quorum).
  147. //
  148. // During a rolling upgrades, the ClusterVersion will be updated
  149. // automatically after a sync. (5 second by default)
  150. //
  151. // The API/raft component can utilize ClusterVersion to determine if
  152. // it can accept a client request or a raft RPC.
  153. // NOTE: ClusterVersion might be nil when etcd 2.1 works with etcd 2.0 and
  154. // the leader is etcd 2.0. etcd 2.0 leader will not update clusterVersion since
  155. // this feature is introduced post 2.0.
  156. ClusterVersion() *semver.Version
  157. Cluster() api.Cluster
  158. Alarms() []*pb.AlarmMember
  159. }
  160. // EtcdServer is the production implementation of the Server interface
  161. type EtcdServer struct {
  162. // inflightSnapshots holds count the number of snapshots currently inflight.
  163. inflightSnapshots int64 // must use atomic operations to access; keep 64-bit aligned.
  164. appliedIndex uint64 // must use atomic operations to access; keep 64-bit aligned.
  165. committedIndex uint64 // must use atomic operations to access; keep 64-bit aligned.
  166. term uint64 // must use atomic operations to access; keep 64-bit aligned.
  167. lead uint64 // must use atomic operations to access; keep 64-bit aligned.
  168. // consistIndex used to hold the offset of current executing entry
  169. // It is initialized to 0 before executing any entry.
  170. consistIndex consistentIndex // must use atomic operations to access; keep 64-bit aligned.
  171. r raftNode // uses 64-bit atomics; keep 64-bit aligned.
  172. readych chan struct{}
  173. Cfg ServerConfig
  174. lgMu *sync.RWMutex
  175. lg *zap.Logger
  176. w wait.Wait
  177. readMu sync.RWMutex
  178. // read routine notifies etcd server that it waits for reading by sending an empty struct to
  179. // readwaitC
  180. readwaitc chan struct{}
  181. // readNotifier is used to notify the read routine that it can process the request
  182. // when there is no error
  183. readNotifier *notifier
  184. // stop signals the run goroutine should shutdown.
  185. stop chan struct{}
  186. // stopping is closed by run goroutine on shutdown.
  187. stopping chan struct{}
  188. // done is closed when all goroutines from start() complete.
  189. done chan struct{}
  190. // leaderChanged is used to notify the linearizable read loop to drop the old read requests.
  191. leaderChanged chan struct{}
  192. leaderChangedMu sync.RWMutex
  193. errorc chan error
  194. id types.ID
  195. attributes membership.Attributes
  196. cluster *membership.RaftCluster
  197. v2store v2store.Store
  198. snapshotter *snap.Snapshotter
  199. applyV2 ApplierV2
  200. // applyV3 is the applier with auth and quotas
  201. applyV3 applierV3
  202. // applyV3Base is the core applier without auth or quotas
  203. applyV3Base applierV3
  204. applyWait wait.WaitTime
  205. kv mvcc.ConsistentWatchableKV
  206. lessor lease.Lessor
  207. bemu sync.Mutex
  208. be backend.Backend
  209. authStore auth.AuthStore
  210. alarmStore *v3alarm.AlarmStore
  211. stats *stats.ServerStats
  212. lstats *stats.LeaderStats
  213. SyncTicker *time.Ticker
  214. // compactor is used to auto-compact the KV.
  215. compactor v3compactor.Compactor
  216. // peerRt used to send requests (version, lease) to peers.
  217. peerRt http.RoundTripper
  218. reqIDGen *idutil.Generator
  219. // forceVersionC is used to force the version monitor loop
  220. // to detect the cluster version immediately.
  221. forceVersionC chan struct{}
  222. // wgMu blocks concurrent waitgroup mutation while server stopping
  223. wgMu sync.RWMutex
  224. // wg is used to wait for the go routines that depends on the server state
  225. // to exit when stopping the server.
  226. wg sync.WaitGroup
  227. // ctx is used for etcd-initiated requests that may need to be canceled
  228. // on etcd server shutdown.
  229. ctx context.Context
  230. cancel context.CancelFunc
  231. leadTimeMu sync.RWMutex
  232. leadElectedTime time.Time
  233. *AccessController
  234. }
  235. // NewServer creates a new EtcdServer from the supplied configuration. The
  236. // configuration is considered static for the lifetime of the EtcdServer.
  237. func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
  238. st := v2store.New(StoreClusterPrefix, StoreKeysPrefix)
  239. var (
  240. w *wal.WAL
  241. n raft.Node
  242. s *raft.MemoryStorage
  243. id types.ID
  244. cl *membership.RaftCluster
  245. )
  246. if cfg.MaxRequestBytes > recommendedMaxRequestBytes {
  247. if cfg.Logger != nil {
  248. cfg.Logger.Warn(
  249. "exceeded recommended request limit",
  250. zap.Uint("max-request-bytes", cfg.MaxRequestBytes),
  251. zap.String("max-request-size", humanize.Bytes(uint64(cfg.MaxRequestBytes))),
  252. zap.Int("recommended-request-bytes", recommendedMaxRequestBytes),
  253. zap.String("recommended-request-size", humanize.Bytes(uint64(recommendedMaxRequestBytes))),
  254. )
  255. } else {
  256. plog.Warningf("MaxRequestBytes %v exceeds maximum recommended size %v", cfg.MaxRequestBytes, recommendedMaxRequestBytes)
  257. }
  258. }
  259. if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil {
  260. return nil, fmt.Errorf("cannot access data directory: %v", terr)
  261. }
  262. haveWAL := wal.Exist(cfg.WALDir())
  263. if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil {
  264. if cfg.Logger != nil {
  265. cfg.Logger.Fatal(
  266. "failed to create snapshot directory",
  267. zap.String("path", cfg.SnapDir()),
  268. zap.Error(err),
  269. )
  270. } else {
  271. plog.Fatalf("create snapshot directory error: %v", err)
  272. }
  273. }
  274. ss := snap.New(cfg.Logger, cfg.SnapDir())
  275. bepath := cfg.backendPath()
  276. beExist := fileutil.Exist(bepath)
  277. be := openBackend(cfg)
  278. defer func() {
  279. if err != nil {
  280. be.Close()
  281. }
  282. }()
  283. prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.peerDialTimeout())
  284. if err != nil {
  285. return nil, err
  286. }
  287. var (
  288. remotes []*membership.Member
  289. snapshot *raftpb.Snapshot
  290. )
  291. switch {
  292. case !haveWAL && !cfg.NewCluster:
  293. if err = cfg.VerifyJoinExisting(); err != nil {
  294. return nil, err
  295. }
  296. cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
  297. if err != nil {
  298. return nil, err
  299. }
  300. existingCluster, gerr := GetClusterFromRemotePeers(cfg.Logger, getRemotePeerURLs(cl, cfg.Name), prt)
  301. if gerr != nil {
  302. return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr)
  303. }
  304. if err = membership.ValidateClusterAndAssignIDs(cfg.Logger, cl, existingCluster); err != nil {
  305. return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
  306. }
  307. if !isCompatibleWithCluster(cfg.Logger, cl, cl.MemberByName(cfg.Name).ID, prt) {
  308. return nil, fmt.Errorf("incompatible with current running cluster")
  309. }
  310. remotes = existingCluster.Members()
  311. cl.SetID(types.ID(0), existingCluster.ID())
  312. cl.SetStore(st)
  313. cl.SetBackend(be)
  314. id, n, s, w = startNode(cfg, cl, nil)
  315. cl.SetID(id, existingCluster.ID())
  316. case !haveWAL && cfg.NewCluster:
  317. if err = cfg.VerifyBootstrap(); err != nil {
  318. return nil, err
  319. }
  320. cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
  321. if err != nil {
  322. return nil, err
  323. }
  324. m := cl.MemberByName(cfg.Name)
  325. if isMemberBootstrapped(cfg.Logger, cl, cfg.Name, prt, cfg.bootstrapTimeout()) {
  326. return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
  327. }
  328. if cfg.ShouldDiscover() {
  329. var str string
  330. str, err = v2discovery.JoinCluster(cfg.Logger, cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
  331. if err != nil {
  332. return nil, &DiscoveryError{Op: "join", Err: err}
  333. }
  334. var urlsmap types.URLsMap
  335. urlsmap, err = types.NewURLsMap(str)
  336. if err != nil {
  337. return nil, err
  338. }
  339. if checkDuplicateURL(urlsmap) {
  340. return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
  341. }
  342. if cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, urlsmap); err != nil {
  343. return nil, err
  344. }
  345. }
  346. cl.SetStore(st)
  347. cl.SetBackend(be)
  348. id, n, s, w = startNode(cfg, cl, cl.MemberIDs())
  349. cl.SetID(id, cl.ID())
  350. case haveWAL:
  351. if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
  352. return nil, fmt.Errorf("cannot write to member directory: %v", err)
  353. }
  354. if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil {
  355. return nil, fmt.Errorf("cannot write to WAL directory: %v", err)
  356. }
  357. if cfg.ShouldDiscover() {
  358. if cfg.Logger != nil {
  359. cfg.Logger.Warn(
  360. "discovery token is ignored since cluster already initialized; valid logs are found",
  361. zap.String("wal-dir", cfg.WALDir()),
  362. )
  363. } else {
  364. plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir())
  365. }
  366. }
  367. snapshot, err = ss.Load()
  368. if err != nil && err != snap.ErrNoSnapshot {
  369. return nil, err
  370. }
  371. if snapshot != nil {
  372. if err = st.Recovery(snapshot.Data); err != nil {
  373. if cfg.Logger != nil {
  374. cfg.Logger.Panic("failed to recover from snapshot")
  375. } else {
  376. plog.Panicf("recovered store from snapshot error: %v", err)
  377. }
  378. }
  379. if cfg.Logger != nil {
  380. cfg.Logger.Info(
  381. "recovered v2 store from snapshot",
  382. zap.Uint64("snapshot-index", snapshot.Metadata.Index),
  383. zap.String("snapshot-size", humanize.Bytes(uint64(snapshot.Size()))),
  384. )
  385. } else {
  386. plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index)
  387. }
  388. if be, err = recoverSnapshotBackend(cfg, be, *snapshot); err != nil {
  389. if cfg.Logger != nil {
  390. cfg.Logger.Panic("failed to recover v3 backend from snapshot", zap.Error(err))
  391. } else {
  392. plog.Panicf("recovering backend from snapshot error: %v", err)
  393. }
  394. }
  395. if cfg.Logger != nil {
  396. s1, s2 := be.Size(), be.SizeInUse()
  397. cfg.Logger.Info(
  398. "recovered v3 backend from snapshot",
  399. zap.Int64("backend-size-bytes", s1),
  400. zap.String("backend-size", humanize.Bytes(uint64(s1))),
  401. zap.Int64("backend-size-in-use-bytes", s2),
  402. zap.String("backend-size-in-use", humanize.Bytes(uint64(s2))),
  403. )
  404. }
  405. }
  406. if !cfg.ForceNewCluster {
  407. id, cl, n, s, w = restartNode(cfg, snapshot)
  408. } else {
  409. id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot)
  410. }
  411. cl.SetStore(st)
  412. cl.SetBackend(be)
  413. cl.Recover(api.UpdateCapability)
  414. if cl.Version() != nil && !cl.Version().LessThan(semver.Version{Major: 3}) && !beExist {
  415. os.RemoveAll(bepath)
  416. return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath)
  417. }
  418. default:
  419. return nil, fmt.Errorf("unsupported bootstrap config")
  420. }
  421. if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil {
  422. return nil, fmt.Errorf("cannot access member directory: %v", terr)
  423. }
  424. sstats := stats.NewServerStats(cfg.Name, id.String())
  425. lstats := stats.NewLeaderStats(id.String())
  426. heartbeat := time.Duration(cfg.TickMs) * time.Millisecond
  427. srv = &EtcdServer{
  428. readych: make(chan struct{}),
  429. Cfg: cfg,
  430. lgMu: new(sync.RWMutex),
  431. lg: cfg.Logger,
  432. errorc: make(chan error, 1),
  433. v2store: st,
  434. snapshotter: ss,
  435. r: *newRaftNode(
  436. raftNodeConfig{
  437. lg: cfg.Logger,
  438. isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
  439. Node: n,
  440. heartbeat: heartbeat,
  441. raftStorage: s,
  442. storage: NewStorage(w, ss),
  443. },
  444. ),
  445. id: id,
  446. attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
  447. cluster: cl,
  448. stats: sstats,
  449. lstats: lstats,
  450. SyncTicker: time.NewTicker(500 * time.Millisecond),
  451. peerRt: prt,
  452. reqIDGen: idutil.NewGenerator(uint16(id), time.Now()),
  453. forceVersionC: make(chan struct{}),
  454. AccessController: &AccessController{CORS: cfg.CORS, HostWhitelist: cfg.HostWhitelist},
  455. }
  456. serverID.With(prometheus.Labels{"server_id": id.String()}).Set(1)
  457. srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster}
  458. srv.be = be
  459. minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat
  460. // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
  461. // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
  462. srv.lessor = lease.NewLessor(
  463. srv.getLogger(),
  464. srv.be,
  465. lease.LessorConfig{
  466. MinLeaseTTL: int64(math.Ceil(minTTL.Seconds())),
  467. CheckpointInterval: cfg.LeaseCheckpointInterval,
  468. ExpiredLeasesRetryInterval: srv.Cfg.ReqTimeout(),
  469. })
  470. srv.kv = mvcc.New(srv.getLogger(), srv.be, srv.lessor, &srv.consistIndex)
  471. if beExist {
  472. kvindex := srv.kv.ConsistentIndex()
  473. // TODO: remove kvindex != 0 checking when we do not expect users to upgrade
  474. // etcd from pre-3.0 release.
  475. if snapshot != nil && kvindex < snapshot.Metadata.Index {
  476. if kvindex != 0 {
  477. return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d)", bepath, kvindex, snapshot.Metadata.Index)
  478. }
  479. if cfg.Logger != nil {
  480. cfg.Logger.Warn(
  481. "consistent index was never saved",
  482. zap.Uint64("snapshot-index", snapshot.Metadata.Index),
  483. )
  484. } else {
  485. plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index)
  486. }
  487. }
  488. }
  489. newSrv := srv // since srv == nil in defer if srv is returned as nil
  490. defer func() {
  491. // closing backend without first closing kv can cause
  492. // resumed compactions to fail with closed tx errors
  493. if err != nil {
  494. newSrv.kv.Close()
  495. }
  496. }()
  497. srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex())
  498. tp, err := auth.NewTokenProvider(cfg.Logger, cfg.AuthToken,
  499. func(index uint64) <-chan struct{} {
  500. return srv.applyWait.Wait(index)
  501. },
  502. )
  503. if err != nil {
  504. if cfg.Logger != nil {
  505. cfg.Logger.Warn("failed to create token provider", zap.Error(err))
  506. } else {
  507. plog.Errorf("failed to create token provider: %s", err)
  508. }
  509. return nil, err
  510. }
  511. srv.authStore = auth.NewAuthStore(srv.getLogger(), srv.be, tp, int(cfg.BcryptCost))
  512. if num := cfg.AutoCompactionRetention; num != 0 {
  513. srv.compactor, err = v3compactor.New(cfg.Logger, cfg.AutoCompactionMode, num, srv.kv, srv)
  514. if err != nil {
  515. return nil, err
  516. }
  517. srv.compactor.Run()
  518. }
  519. srv.applyV3Base = srv.newApplierV3Backend()
  520. if err = srv.restoreAlarms(); err != nil {
  521. return nil, err
  522. }
  523. srv.lessor.SetCheckpointer(func(ctx context.Context, cp *pb.LeaseCheckpointRequest) {
  524. srv.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseCheckpoint: cp})
  525. })
  526. // TODO: move transport initialization near the definition of remote
  527. tr := &rafthttp.Transport{
  528. Logger: cfg.Logger,
  529. TLSInfo: cfg.PeerTLSInfo,
  530. DialTimeout: cfg.peerDialTimeout(),
  531. ID: id,
  532. URLs: cfg.PeerURLs,
  533. ClusterID: cl.ID(),
  534. Raft: srv,
  535. Snapshotter: ss,
  536. ServerStats: sstats,
  537. LeaderStats: lstats,
  538. ErrorC: srv.errorc,
  539. }
  540. if err = tr.Start(); err != nil {
  541. return nil, err
  542. }
  543. // add all remotes into transport
  544. for _, m := range remotes {
  545. if m.ID != id {
  546. tr.AddRemote(m.ID, m.PeerURLs)
  547. }
  548. }
  549. for _, m := range cl.Members() {
  550. if m.ID != id {
  551. tr.AddPeer(m.ID, m.PeerURLs)
  552. }
  553. }
  554. srv.r.transport = tr
  555. return srv, nil
  556. }
  557. func (s *EtcdServer) getLogger() *zap.Logger {
  558. s.lgMu.RLock()
  559. l := s.lg
  560. s.lgMu.RUnlock()
  561. return l
  562. }
  563. func tickToDur(ticks int, tickMs uint) string {
  564. return fmt.Sprintf("%v", time.Duration(ticks)*time.Duration(tickMs)*time.Millisecond)
  565. }
  566. func (s *EtcdServer) adjustTicks() {
  567. lg := s.getLogger()
  568. clusterN := len(s.cluster.Members())
  569. // single-node fresh start, or single-node recovers from snapshot
  570. if clusterN == 1 {
  571. ticks := s.Cfg.ElectionTicks - 1
  572. if lg != nil {
  573. lg.Info(
  574. "started as single-node; fast-forwarding election ticks",
  575. zap.String("local-member-id", s.ID().String()),
  576. zap.Int("forward-ticks", ticks),
  577. zap.String("forward-duration", tickToDur(ticks, s.Cfg.TickMs)),
  578. zap.Int("election-ticks", s.Cfg.ElectionTicks),
  579. zap.String("election-timeout", tickToDur(s.Cfg.ElectionTicks, s.Cfg.TickMs)),
  580. )
  581. } else {
  582. plog.Infof("%s as single-node; fast-forwarding %d ticks (election ticks %d)", s.ID(), ticks, s.Cfg.ElectionTicks)
  583. }
  584. s.r.advanceTicks(ticks)
  585. return
  586. }
  587. if !s.Cfg.InitialElectionTickAdvance {
  588. if lg != nil {
  589. lg.Info("skipping initial election tick advance", zap.Int("election-ticks", s.Cfg.ElectionTicks))
  590. }
  591. return
  592. }
  593. if lg != nil {
  594. lg.Info("starting initial election tick advance", zap.Int("election-ticks", s.Cfg.ElectionTicks))
  595. }
  596. // retry up to "rafthttp.ConnReadTimeout", which is 5-sec
  597. // until peer connection reports; otherwise:
  598. // 1. all connections failed, or
  599. // 2. no active peers, or
  600. // 3. restarted single-node with no snapshot
  601. // then, do nothing, because advancing ticks would have no effect
  602. waitTime := rafthttp.ConnReadTimeout
  603. itv := 50 * time.Millisecond
  604. for i := int64(0); i < int64(waitTime/itv); i++ {
  605. select {
  606. case <-time.After(itv):
  607. case <-s.stopping:
  608. return
  609. }
  610. peerN := s.r.transport.ActivePeers()
  611. if peerN > 1 {
  612. // multi-node received peer connection reports
  613. // adjust ticks, in case slow leader message receive
  614. ticks := s.Cfg.ElectionTicks - 2
  615. if lg != nil {
  616. lg.Info(
  617. "initialized peer connections; fast-forwarding election ticks",
  618. zap.String("local-member-id", s.ID().String()),
  619. zap.Int("forward-ticks", ticks),
  620. zap.String("forward-duration", tickToDur(ticks, s.Cfg.TickMs)),
  621. zap.Int("election-ticks", s.Cfg.ElectionTicks),
  622. zap.String("election-timeout", tickToDur(s.Cfg.ElectionTicks, s.Cfg.TickMs)),
  623. zap.Int("active-remote-members", peerN),
  624. )
  625. } else {
  626. plog.Infof("%s initialized peer connection; fast-forwarding %d ticks (election ticks %d) with %d active peer(s)", s.ID(), ticks, s.Cfg.ElectionTicks, peerN)
  627. }
  628. s.r.advanceTicks(ticks)
  629. return
  630. }
  631. }
  632. }
  633. // Start performs any initialization of the Server necessary for it to
  634. // begin serving requests. It must be called before Do or Process.
  635. // Start must be non-blocking; any long-running server functionality
  636. // should be implemented in goroutines.
  637. func (s *EtcdServer) Start() {
  638. s.start()
  639. s.goAttach(func() { s.adjustTicks() })
  640. s.goAttach(func() { s.publish(s.Cfg.ReqTimeout()) })
  641. s.goAttach(s.purgeFile)
  642. s.goAttach(func() { monitorFileDescriptor(s.getLogger(), s.stopping) })
  643. s.goAttach(s.monitorVersions)
  644. s.goAttach(s.linearizableReadLoop)
  645. s.goAttach(s.monitorKVHash)
  646. }
  647. // start prepares and starts server in a new goroutine. It is no longer safe to
  648. // modify a server's fields after it has been sent to Start.
  649. // This function is just used for testing.
  650. func (s *EtcdServer) start() {
  651. lg := s.getLogger()
  652. if s.Cfg.SnapshotCount == 0 {
  653. if lg != nil {
  654. lg.Info(
  655. "updating snapshot-count to default",
  656. zap.Uint64("given-snapshot-count", s.Cfg.SnapshotCount),
  657. zap.Uint64("updated-snapshot-count", DefaultSnapshotCount),
  658. )
  659. } else {
  660. plog.Infof("set snapshot count to default %d", DefaultSnapshotCount)
  661. }
  662. s.Cfg.SnapshotCount = DefaultSnapshotCount
  663. }
  664. if s.Cfg.SnapshotCatchUpEntries == 0 {
  665. if lg != nil {
  666. lg.Info(
  667. "updating snapshot catch-up entries to default",
  668. zap.Uint64("given-snapshot-catchup-entries", s.Cfg.SnapshotCatchUpEntries),
  669. zap.Uint64("updated-snapshot-catchup-entries", DefaultSnapshotCatchUpEntries),
  670. )
  671. }
  672. s.Cfg.SnapshotCatchUpEntries = DefaultSnapshotCatchUpEntries
  673. }
  674. s.w = wait.New()
  675. s.applyWait = wait.NewTimeList()
  676. s.done = make(chan struct{})
  677. s.stop = make(chan struct{})
  678. s.stopping = make(chan struct{})
  679. s.ctx, s.cancel = context.WithCancel(context.Background())
  680. s.readwaitc = make(chan struct{}, 1)
  681. s.readNotifier = newNotifier()
  682. s.leaderChanged = make(chan struct{})
  683. if s.ClusterVersion() != nil {
  684. if lg != nil {
  685. lg.Info(
  686. "starting etcd server",
  687. zap.String("local-member-id", s.ID().String()),
  688. zap.String("local-server-version", version.Version),
  689. zap.String("cluster-id", s.Cluster().ID().String()),
  690. zap.String("cluster-version", version.Cluster(s.ClusterVersion().String())),
  691. )
  692. } else {
  693. plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String()))
  694. }
  695. membership.ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": s.ClusterVersion().String()}).Set(1)
  696. } else {
  697. if lg != nil {
  698. lg.Info(
  699. "starting etcd server",
  700. zap.String("local-member-id", s.ID().String()),
  701. zap.String("local-server-version", version.Version),
  702. zap.String("cluster-version", "to_be_decided"),
  703. )
  704. } else {
  705. plog.Infof("starting server... [version: %v, cluster version: to_be_decided]", version.Version)
  706. }
  707. }
  708. // TODO: if this is an empty log, writes all peer infos
  709. // into the first entry
  710. go s.run()
  711. }
  712. func (s *EtcdServer) purgeFile() {
  713. var dberrc, serrc, werrc <-chan error
  714. if s.Cfg.MaxSnapFiles > 0 {
  715. dberrc = fileutil.PurgeFile(s.getLogger(), s.Cfg.SnapDir(), "snap.db", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done)
  716. serrc = fileutil.PurgeFile(s.getLogger(), s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done)
  717. }
  718. if s.Cfg.MaxWALFiles > 0 {
  719. werrc = fileutil.PurgeFile(s.getLogger(), s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.done)
  720. }
  721. lg := s.getLogger()
  722. select {
  723. case e := <-dberrc:
  724. if lg != nil {
  725. lg.Fatal("failed to purge snap db file", zap.Error(e))
  726. } else {
  727. plog.Fatalf("failed to purge snap db file %v", e)
  728. }
  729. case e := <-serrc:
  730. if lg != nil {
  731. lg.Fatal("failed to purge snap file", zap.Error(e))
  732. } else {
  733. plog.Fatalf("failed to purge snap file %v", e)
  734. }
  735. case e := <-werrc:
  736. if lg != nil {
  737. lg.Fatal("failed to purge wal file", zap.Error(e))
  738. } else {
  739. plog.Fatalf("failed to purge wal file %v", e)
  740. }
  741. case <-s.stopping:
  742. return
  743. }
  744. }
  745. func (s *EtcdServer) Cluster() api.Cluster { return s.cluster }
  746. func (s *EtcdServer) ApplyWait() <-chan struct{} { return s.applyWait.Wait(s.getCommittedIndex()) }
  747. type ServerPeer interface {
  748. ServerV2
  749. RaftHandler() http.Handler
  750. LeaseHandler() http.Handler
  751. }
  752. func (s *EtcdServer) LeaseHandler() http.Handler {
  753. if s.lessor == nil {
  754. return nil
  755. }
  756. return leasehttp.NewHandler(s.lessor, s.ApplyWait)
  757. }
  758. func (s *EtcdServer) RaftHandler() http.Handler { return s.r.transport.Handler() }
  759. // Process takes a raft message and applies it to the server's raft state
  760. // machine, respecting any timeout of the given context.
  761. func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
  762. if s.cluster.IsIDRemoved(types.ID(m.From)) {
  763. if lg := s.getLogger(); lg != nil {
  764. lg.Warn(
  765. "rejected Raft message from removed member",
  766. zap.String("local-member-id", s.ID().String()),
  767. zap.String("removed-member-id", types.ID(m.From).String()),
  768. )
  769. } else {
  770. plog.Warningf("reject message from removed member %s", types.ID(m.From).String())
  771. }
  772. return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message from removed member")
  773. }
  774. if m.Type == raftpb.MsgApp {
  775. s.stats.RecvAppendReq(types.ID(m.From).String(), m.Size())
  776. }
  777. return s.r.Step(ctx, m)
  778. }
  779. func (s *EtcdServer) IsIDRemoved(id uint64) bool { return s.cluster.IsIDRemoved(types.ID(id)) }
  780. func (s *EtcdServer) ReportUnreachable(id uint64) { s.r.ReportUnreachable(id) }
  781. // ReportSnapshot reports snapshot sent status to the raft state machine,
  782. // and clears the used snapshot from the snapshot store.
  783. func (s *EtcdServer) ReportSnapshot(id uint64, status raft.SnapshotStatus) {
  784. s.r.ReportSnapshot(id, status)
  785. }
  786. type etcdProgress struct {
  787. confState raftpb.ConfState
  788. snapi uint64
  789. appliedt uint64
  790. appliedi uint64
  791. }
  792. // raftReadyHandler contains a set of EtcdServer operations to be called by raftNode,
  793. // and helps decouple state machine logic from Raft algorithms.
  794. // TODO: add a state machine interface to apply the commit entries and do snapshot/recover
  795. type raftReadyHandler struct {
  796. getLead func() (lead uint64)
  797. updateLead func(lead uint64)
  798. updateLeadership func(newLeader bool)
  799. updateCommittedIndex func(uint64)
  800. }
  801. func (s *EtcdServer) run() {
  802. lg := s.getLogger()
  803. sn, err := s.r.raftStorage.Snapshot()
  804. if err != nil {
  805. if lg != nil {
  806. lg.Panic("failed to get snapshot from Raft storage", zap.Error(err))
  807. } else {
  808. plog.Panicf("get snapshot from raft storage error: %v", err)
  809. }
  810. }
  811. // asynchronously accept apply packets, dispatch progress in-order
  812. sched := schedule.NewFIFOScheduler()
  813. var (
  814. smu sync.RWMutex
  815. syncC <-chan time.Time
  816. )
  817. setSyncC := func(ch <-chan time.Time) {
  818. smu.Lock()
  819. syncC = ch
  820. smu.Unlock()
  821. }
  822. getSyncC := func() (ch <-chan time.Time) {
  823. smu.RLock()
  824. ch = syncC
  825. smu.RUnlock()
  826. return
  827. }
  828. rh := &raftReadyHandler{
  829. getLead: func() (lead uint64) { return s.getLead() },
  830. updateLead: func(lead uint64) { s.setLead(lead) },
  831. updateLeadership: func(newLeader bool) {
  832. if !s.isLeader() {
  833. if s.lessor != nil {
  834. s.lessor.Demote()
  835. }
  836. if s.compactor != nil {
  837. s.compactor.Pause()
  838. }
  839. setSyncC(nil)
  840. } else {
  841. if newLeader {
  842. t := time.Now()
  843. s.leadTimeMu.Lock()
  844. s.leadElectedTime = t
  845. s.leadTimeMu.Unlock()
  846. }
  847. setSyncC(s.SyncTicker.C)
  848. if s.compactor != nil {
  849. s.compactor.Resume()
  850. }
  851. }
  852. if newLeader {
  853. s.leaderChangedMu.Lock()
  854. lc := s.leaderChanged
  855. s.leaderChanged = make(chan struct{})
  856. close(lc)
  857. s.leaderChangedMu.Unlock()
  858. }
  859. // TODO: remove the nil checking
  860. // current test utility does not provide the stats
  861. if s.stats != nil {
  862. s.stats.BecomeLeader()
  863. }
  864. },
  865. updateCommittedIndex: func(ci uint64) {
  866. cci := s.getCommittedIndex()
  867. if ci > cci {
  868. s.setCommittedIndex(ci)
  869. }
  870. },
  871. }
  872. s.r.start(rh)
  873. ep := etcdProgress{
  874. confState: sn.Metadata.ConfState,
  875. snapi: sn.Metadata.Index,
  876. appliedt: sn.Metadata.Term,
  877. appliedi: sn.Metadata.Index,
  878. }
  879. defer func() {
  880. s.wgMu.Lock() // block concurrent waitgroup adds in goAttach while stopping
  881. close(s.stopping)
  882. s.wgMu.Unlock()
  883. s.cancel()
  884. sched.Stop()
  885. // wait for gouroutines before closing raft so wal stays open
  886. s.wg.Wait()
  887. s.SyncTicker.Stop()
  888. // must stop raft after scheduler-- etcdserver can leak rafthttp pipelines
  889. // by adding a peer after raft stops the transport
  890. s.r.stop()
  891. // kv, lessor and backend can be nil if running without v3 enabled
  892. // or running unit tests.
  893. if s.lessor != nil {
  894. s.lessor.Stop()
  895. }
  896. if s.kv != nil {
  897. s.kv.Close()
  898. }
  899. if s.authStore != nil {
  900. s.authStore.Close()
  901. }
  902. if s.be != nil {
  903. s.be.Close()
  904. }
  905. if s.compactor != nil {
  906. s.compactor.Stop()
  907. }
  908. close(s.done)
  909. }()
  910. var expiredLeaseC <-chan []*lease.Lease
  911. if s.lessor != nil {
  912. expiredLeaseC = s.lessor.ExpiredLeasesC()
  913. }
  914. for {
  915. select {
  916. case ap := <-s.r.apply():
  917. f := func(context.Context) { s.applyAll(&ep, &ap) }
  918. sched.Schedule(f)
  919. case leases := <-expiredLeaseC:
  920. s.goAttach(func() {
  921. // Increases throughput of expired leases deletion process through parallelization
  922. c := make(chan struct{}, maxPendingRevokes)
  923. for _, lease := range leases {
  924. select {
  925. case c <- struct{}{}:
  926. case <-s.stopping:
  927. return
  928. }
  929. lid := lease.ID
  930. s.goAttach(func() {
  931. ctx := s.authStore.WithRoot(s.ctx)
  932. _, lerr := s.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: int64(lid)})
  933. if lerr == nil {
  934. leaseExpired.Inc()
  935. } else {
  936. if lg != nil {
  937. lg.Warn(
  938. "failed to revoke lease",
  939. zap.String("lease-id", fmt.Sprintf("%016x", lid)),
  940. zap.Error(lerr),
  941. )
  942. } else {
  943. plog.Warningf("failed to revoke %016x (%q)", lid, lerr.Error())
  944. }
  945. }
  946. <-c
  947. })
  948. }
  949. })
  950. case err := <-s.errorc:
  951. if lg != nil {
  952. lg.Warn("server error", zap.Error(err))
  953. lg.Warn("data-dir used by this member must be removed")
  954. } else {
  955. plog.Errorf("%s", err)
  956. plog.Infof("the data-dir used by this member must be removed.")
  957. }
  958. return
  959. case <-getSyncC():
  960. if s.v2store.HasTTLKeys() {
  961. s.sync(s.Cfg.ReqTimeout())
  962. }
  963. case <-s.stop:
  964. return
  965. }
  966. }
  967. }
  968. func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) {
  969. s.applySnapshot(ep, apply)
  970. s.applyEntries(ep, apply)
  971. proposalsApplied.Set(float64(ep.appliedi))
  972. s.applyWait.Trigger(ep.appliedi)
  973. // wait for the raft routine to finish the disk writes before triggering a
  974. // snapshot. or applied index might be greater than the last index in raft
  975. // storage, since the raft routine might be slower than apply routine.
  976. <-apply.notifyc
  977. s.triggerSnapshot(ep)
  978. select {
  979. // snapshot requested via send()
  980. case m := <-s.r.msgSnapC:
  981. merged := s.createMergedSnapshotMessage(m, ep.appliedt, ep.appliedi, ep.confState)
  982. s.sendMergedSnap(merged)
  983. default:
  984. }
  985. }
  986. func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
  987. if raft.IsEmptySnap(apply.snapshot) {
  988. return
  989. }
  990. lg := s.getLogger()
  991. if lg != nil {
  992. lg.Info(
  993. "applying snapshot",
  994. zap.Uint64("current-snapshot-index", ep.snapi),
  995. zap.Uint64("current-applied-index", ep.appliedi),
  996. zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index),
  997. zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term),
  998. )
  999. } else {
  1000. plog.Infof("applying snapshot at index %d...", ep.snapi)
  1001. }
  1002. defer func() {
  1003. if lg != nil {
  1004. lg.Info(
  1005. "applied snapshot",
  1006. zap.Uint64("current-snapshot-index", ep.snapi),
  1007. zap.Uint64("current-applied-index", ep.appliedi),
  1008. zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index),
  1009. zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term),
  1010. )
  1011. } else {
  1012. plog.Infof("finished applying incoming snapshot at index %d", ep.snapi)
  1013. }
  1014. }()
  1015. if apply.snapshot.Metadata.Index <= ep.appliedi {
  1016. if lg != nil {
  1017. lg.Panic(
  1018. "unexpected leader snapshot from outdated index",
  1019. zap.Uint64("current-snapshot-index", ep.snapi),
  1020. zap.Uint64("current-applied-index", ep.appliedi),
  1021. zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index),
  1022. zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term),
  1023. )
  1024. } else {
  1025. plog.Panicf("snapshot index [%d] should > appliedi[%d] + 1",
  1026. apply.snapshot.Metadata.Index, ep.appliedi)
  1027. }
  1028. }
  1029. // wait for raftNode to persist snapshot onto the disk
  1030. <-apply.notifyc
  1031. newbe, err := openSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot)
  1032. if err != nil {
  1033. if lg != nil {
  1034. lg.Panic("failed to open snapshot backend", zap.Error(err))
  1035. } else {
  1036. plog.Panic(err)
  1037. }
  1038. }
  1039. // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
  1040. // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
  1041. if s.lessor != nil {
  1042. if lg != nil {
  1043. lg.Info("restoring lease store")
  1044. } else {
  1045. plog.Info("recovering lessor...")
  1046. }
  1047. s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write() })
  1048. if lg != nil {
  1049. lg.Info("restored lease store")
  1050. } else {
  1051. plog.Info("finished recovering lessor")
  1052. }
  1053. }
  1054. if lg != nil {
  1055. lg.Info("restoring mvcc store")
  1056. } else {
  1057. plog.Info("restoring mvcc store...")
  1058. }
  1059. if err := s.kv.Restore(newbe); err != nil {
  1060. if lg != nil {
  1061. lg.Panic("failed to restore mvcc store", zap.Error(err))
  1062. } else {
  1063. plog.Panicf("restore KV error: %v", err)
  1064. }
  1065. }
  1066. s.consistIndex.setConsistentIndex(s.kv.ConsistentIndex())
  1067. if lg != nil {
  1068. lg.Info("restored mvcc store")
  1069. } else {
  1070. plog.Info("finished restoring mvcc store")
  1071. }
  1072. // Closing old backend might block until all the txns
  1073. // on the backend are finished.
  1074. // We do not want to wait on closing the old backend.
  1075. s.bemu.Lock()
  1076. oldbe := s.be
  1077. go func() {
  1078. if lg != nil {
  1079. lg.Info("closing old backend file")
  1080. } else {
  1081. plog.Info("closing old backend...")
  1082. }
  1083. defer func() {
  1084. if lg != nil {
  1085. lg.Info("closed old backend file")
  1086. } else {
  1087. plog.Info("finished closing old backend")
  1088. }
  1089. }()
  1090. if err := oldbe.Close(); err != nil {
  1091. if lg != nil {
  1092. lg.Panic("failed to close old backend", zap.Error(err))
  1093. } else {
  1094. plog.Panicf("close backend error: %v", err)
  1095. }
  1096. }
  1097. }()
  1098. s.be = newbe
  1099. s.bemu.Unlock()
  1100. if lg != nil {
  1101. lg.Info("restoring alarm store")
  1102. } else {
  1103. plog.Info("recovering alarms...")
  1104. }
  1105. if err := s.restoreAlarms(); err != nil {
  1106. if lg != nil {
  1107. lg.Panic("failed to restore alarm store", zap.Error(err))
  1108. } else {
  1109. plog.Panicf("restore alarms error: %v", err)
  1110. }
  1111. }
  1112. if lg != nil {
  1113. lg.Info("restored alarm store")
  1114. } else {
  1115. plog.Info("finished recovering alarms")
  1116. }
  1117. if s.authStore != nil {
  1118. if lg != nil {
  1119. lg.Info("restoring auth store")
  1120. } else {
  1121. plog.Info("recovering auth store...")
  1122. }
  1123. s.authStore.Recover(newbe)
  1124. if lg != nil {
  1125. lg.Info("restored auth store")
  1126. } else {
  1127. plog.Info("finished recovering auth store")
  1128. }
  1129. }
  1130. if lg != nil {
  1131. lg.Info("restoring v2 store")
  1132. } else {
  1133. plog.Info("recovering store v2...")
  1134. }
  1135. if err := s.v2store.Recovery(apply.snapshot.Data); err != nil {
  1136. if lg != nil {
  1137. lg.Panic("failed to restore v2 store", zap.Error(err))
  1138. } else {
  1139. plog.Panicf("recovery store error: %v", err)
  1140. }
  1141. }
  1142. if lg != nil {
  1143. lg.Info("restored v2 store")
  1144. } else {
  1145. plog.Info("finished recovering store v2")
  1146. }
  1147. s.cluster.SetBackend(s.be)
  1148. if lg != nil {
  1149. lg.Info("restoring cluster configuration")
  1150. } else {
  1151. plog.Info("recovering cluster configuration...")
  1152. }
  1153. s.cluster.Recover(api.UpdateCapability)
  1154. if lg != nil {
  1155. lg.Info("restored cluster configuration")
  1156. lg.Info("removing old peers from network")
  1157. } else {
  1158. plog.Info("finished recovering cluster configuration")
  1159. plog.Info("removing old peers from network...")
  1160. }
  1161. // recover raft transport
  1162. s.r.transport.RemoveAllPeers()
  1163. if lg != nil {
  1164. lg.Info("removed old peers from network")
  1165. lg.Info("adding peers from new cluster configuration")
  1166. } else {
  1167. plog.Info("finished removing old peers from network")
  1168. plog.Info("adding peers from new cluster configuration into network...")
  1169. }
  1170. for _, m := range s.cluster.Members() {
  1171. if m.ID == s.ID() {
  1172. continue
  1173. }
  1174. s.r.transport.AddPeer(m.ID, m.PeerURLs)
  1175. }
  1176. if lg != nil {
  1177. lg.Info("added peers from new cluster configuration")
  1178. } else {
  1179. plog.Info("finished adding peers from new cluster configuration into network...")
  1180. }
  1181. ep.appliedt = apply.snapshot.Metadata.Term
  1182. ep.appliedi = apply.snapshot.Metadata.Index
  1183. ep.snapi = ep.appliedi
  1184. ep.confState = apply.snapshot.Metadata.ConfState
  1185. }
  1186. func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) {
  1187. if len(apply.entries) == 0 {
  1188. return
  1189. }
  1190. firsti := apply.entries[0].Index
  1191. if firsti > ep.appliedi+1 {
  1192. if lg := s.getLogger(); lg != nil {
  1193. lg.Panic(
  1194. "unexpected committed entry index",
  1195. zap.Uint64("current-applied-index", ep.appliedi),
  1196. zap.Uint64("first-committed-entry-index", firsti),
  1197. )
  1198. } else {
  1199. plog.Panicf("first index of committed entry[%d] should <= appliedi[%d] + 1", firsti, ep.appliedi)
  1200. }
  1201. }
  1202. var ents []raftpb.Entry
  1203. if ep.appliedi+1-firsti < uint64(len(apply.entries)) {
  1204. ents = apply.entries[ep.appliedi+1-firsti:]
  1205. }
  1206. if len(ents) == 0 {
  1207. return
  1208. }
  1209. var shouldstop bool
  1210. if ep.appliedt, ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop {
  1211. go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf("the member has been permanently removed from the cluster"))
  1212. }
  1213. }
  1214. func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) {
  1215. if ep.appliedi-ep.snapi <= s.Cfg.SnapshotCount {
  1216. return
  1217. }
  1218. if lg := s.getLogger(); lg != nil {
  1219. lg.Info(
  1220. "triggering snapshot",
  1221. zap.String("local-member-id", s.ID().String()),
  1222. zap.Uint64("local-member-applied-index", ep.appliedi),
  1223. zap.Uint64("local-member-snapshot-index", ep.snapi),
  1224. zap.Uint64("local-member-snapshot-count", s.Cfg.SnapshotCount),
  1225. )
  1226. } else {
  1227. plog.Infof("start to snapshot (applied: %d, lastsnap: %d)", ep.appliedi, ep.snapi)
  1228. }
  1229. s.snapshot(ep.appliedi, ep.confState)
  1230. ep.snapi = ep.appliedi
  1231. }
  1232. func (s *EtcdServer) hasMultipleVotingMembers() bool {
  1233. return s.cluster != nil && len(s.cluster.VotingMemberIDs()) > 1
  1234. }
  1235. func (s *EtcdServer) isLeader() bool {
  1236. return uint64(s.ID()) == s.Lead()
  1237. }
  1238. // MoveLeader transfers the leader to the given transferee.
  1239. func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) error {
  1240. if !s.cluster.IsMemberExist(types.ID(transferee)) || s.cluster.Member(types.ID(transferee)).IsLearner {
  1241. return ErrBadLeaderTransferee
  1242. }
  1243. now := time.Now()
  1244. interval := time.Duration(s.Cfg.TickMs) * time.Millisecond
  1245. if lg := s.getLogger(); lg != nil {
  1246. lg.Info(
  1247. "leadership transfer starting",
  1248. zap.String("local-member-id", s.ID().String()),
  1249. zap.String("current-leader-member-id", types.ID(lead).String()),
  1250. zap.String("transferee-member-id", types.ID(transferee).String()),
  1251. )
  1252. } else {
  1253. plog.Infof("%s starts leadership transfer from %s to %s", s.ID(), types.ID(lead), types.ID(transferee))
  1254. }
  1255. s.r.TransferLeadership(ctx, lead, transferee)
  1256. for s.Lead() != transferee {
  1257. select {
  1258. case <-ctx.Done(): // time out
  1259. return ErrTimeoutLeaderTransfer
  1260. case <-time.After(interval):
  1261. }
  1262. }
  1263. // TODO: drain all requests, or drop all messages to the old leader
  1264. if lg := s.getLogger(); lg != nil {
  1265. lg.Info(
  1266. "leadership transfer finished",
  1267. zap.String("local-member-id", s.ID().String()),
  1268. zap.String("old-leader-member-id", types.ID(lead).String()),
  1269. zap.String("new-leader-member-id", types.ID(transferee).String()),
  1270. zap.Duration("took", time.Since(now)),
  1271. )
  1272. } else {
  1273. plog.Infof("%s finished leadership transfer from %s to %s (took %v)", s.ID(), types.ID(lead), types.ID(transferee), time.Since(now))
  1274. }
  1275. return nil
  1276. }
  1277. // TransferLeadership transfers the leader to the chosen transferee.
  1278. func (s *EtcdServer) TransferLeadership() error {
  1279. if !s.isLeader() {
  1280. if lg := s.getLogger(); lg != nil {
  1281. lg.Info(
  1282. "skipped leadership transfer; local server is not leader",
  1283. zap.String("local-member-id", s.ID().String()),
  1284. zap.String("current-leader-member-id", types.ID(s.Lead()).String()),
  1285. )
  1286. } else {
  1287. plog.Printf("skipped leadership transfer for stopping non-leader member")
  1288. }
  1289. return nil
  1290. }
  1291. if !s.hasMultipleVotingMembers() {
  1292. if lg := s.getLogger(); lg != nil {
  1293. lg.Info(
  1294. "skipped leadership transfer for single voting member cluster",
  1295. zap.String("local-member-id", s.ID().String()),
  1296. zap.String("current-leader-member-id", types.ID(s.Lead()).String()),
  1297. )
  1298. } else {
  1299. plog.Printf("skipped leadership transfer for single voting member cluster")
  1300. }
  1301. return nil
  1302. }
  1303. transferee, ok := longestConnected(s.r.transport, s.cluster.VotingMemberIDs())
  1304. if !ok {
  1305. return ErrUnhealthy
  1306. }
  1307. tm := s.Cfg.ReqTimeout()
  1308. ctx, cancel := context.WithTimeout(s.ctx, tm)
  1309. err := s.MoveLeader(ctx, s.Lead(), uint64(transferee))
  1310. cancel()
  1311. return err
  1312. }
  1313. // HardStop stops the server without coordination with other members in the cluster.
  1314. func (s *EtcdServer) HardStop() {
  1315. select {
  1316. case s.stop <- struct{}{}:
  1317. case <-s.done:
  1318. return
  1319. }
  1320. <-s.done
  1321. }
  1322. // Stop stops the server gracefully, and shuts down the running goroutine.
  1323. // Stop should be called after a Start(s), otherwise it will block forever.
  1324. // When stopping leader, Stop transfers its leadership to one of its peers
  1325. // before stopping the server.
  1326. // Stop terminates the Server and performs any necessary finalization.
  1327. // Do and Process cannot be called after Stop has been invoked.
  1328. func (s *EtcdServer) Stop() {
  1329. if err := s.TransferLeadership(); err != nil {
  1330. if lg := s.getLogger(); lg != nil {
  1331. lg.Warn("leadership transfer failed", zap.String("local-member-id", s.ID().String()), zap.Error(err))
  1332. } else {
  1333. plog.Warningf("%s failed to transfer leadership (%v)", s.ID(), err)
  1334. }
  1335. }
  1336. s.HardStop()
  1337. }
  1338. // ReadyNotify returns a channel that will be closed when the server
  1339. // is ready to serve client requests
  1340. func (s *EtcdServer) ReadyNotify() <-chan struct{} { return s.readych }
  1341. func (s *EtcdServer) stopWithDelay(d time.Duration, err error) {
  1342. select {
  1343. case <-time.After(d):
  1344. case <-s.done:
  1345. }
  1346. select {
  1347. case s.errorc <- err:
  1348. default:
  1349. }
  1350. }
  1351. // StopNotify returns a channel that receives a empty struct
  1352. // when the server is stopped.
  1353. func (s *EtcdServer) StopNotify() <-chan struct{} { return s.done }
  1354. func (s *EtcdServer) SelfStats() []byte { return s.stats.JSON() }
  1355. func (s *EtcdServer) LeaderStats() []byte {
  1356. lead := s.getLead()
  1357. if lead != uint64(s.id) {
  1358. return nil
  1359. }
  1360. return s.lstats.JSON()
  1361. }
  1362. func (s *EtcdServer) StoreStats() []byte { return s.v2store.JsonStats() }
  1363. func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error {
  1364. if s.authStore == nil {
  1365. // In the context of ordinary etcd process, s.authStore will never be nil.
  1366. // This branch is for handling cases in server_test.go
  1367. return nil
  1368. }
  1369. // Note that this permission check is done in the API layer,
  1370. // so TOCTOU problem can be caused potentially in a schedule like this:
  1371. // update membership with user A -> revoke root role of A -> apply membership change
  1372. // in the state machine layer
  1373. // However, both of membership change and role management requires the root privilege.
  1374. // So careful operation by admins can prevent the problem.
  1375. authInfo, err := s.AuthInfoFromCtx(ctx)
  1376. if err != nil {
  1377. return err
  1378. }
  1379. return s.AuthStore().IsAdminPermitted(authInfo)
  1380. }
  1381. func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
  1382. if err := s.checkMembershipOperationPermission(ctx); err != nil {
  1383. return nil, err
  1384. }
  1385. // TODO: move Member to protobuf type
  1386. b, err := json.Marshal(memb)
  1387. if err != nil {
  1388. return nil, err
  1389. }
  1390. // by default StrictReconfigCheck is enabled; reject new members if unhealthy.
  1391. if err := s.mayAddMember(memb); err != nil {
  1392. return nil, err
  1393. }
  1394. cc := raftpb.ConfChange{
  1395. Type: raftpb.ConfChangeAddNode,
  1396. NodeID: uint64(memb.ID),
  1397. Context: b,
  1398. }
  1399. if memb.IsLearner {
  1400. cc.Type = raftpb.ConfChangeAddLearnerNode
  1401. }
  1402. return s.configure(ctx, cc)
  1403. }
  1404. func (s *EtcdServer) mayAddMember(memb membership.Member) error {
  1405. if !s.Cfg.StrictReconfigCheck {
  1406. return nil
  1407. }
  1408. // protect quorum when adding voting member
  1409. if !memb.IsLearner && !s.cluster.IsReadyToAddVotingMember() {
  1410. if lg := s.getLogger(); lg != nil {
  1411. lg.Warn(
  1412. "rejecting member add request; not enough healthy members",
  1413. zap.String("local-member-id", s.ID().String()),
  1414. zap.String("requested-member-add", fmt.Sprintf("%+v", memb)),
  1415. zap.Error(ErrNotEnoughStartedMembers),
  1416. )
  1417. } else {
  1418. plog.Warningf("not enough started members, rejecting member add %+v", memb)
  1419. }
  1420. return ErrNotEnoughStartedMembers
  1421. }
  1422. if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.VotingMembers()) {
  1423. if lg := s.getLogger(); lg != nil {
  1424. lg.Warn(
  1425. "rejecting member add request; local member has not been connected to all peers, reconfigure breaks active quorum",
  1426. zap.String("local-member-id", s.ID().String()),
  1427. zap.String("requested-member-add", fmt.Sprintf("%+v", memb)),
  1428. zap.Error(ErrUnhealthy),
  1429. )
  1430. } else {
  1431. plog.Warningf("not healthy for reconfigure, rejecting member add %+v", memb)
  1432. }
  1433. return ErrUnhealthy
  1434. }
  1435. return nil
  1436. }
  1437. func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
  1438. if err := s.checkMembershipOperationPermission(ctx); err != nil {
  1439. return nil, err
  1440. }
  1441. // by default StrictReconfigCheck is enabled; reject removal if leads to quorum loss
  1442. if err := s.mayRemoveMember(types.ID(id)); err != nil {
  1443. return nil, err
  1444. }
  1445. cc := raftpb.ConfChange{
  1446. Type: raftpb.ConfChangeRemoveNode,
  1447. NodeID: id,
  1448. }
  1449. return s.configure(ctx, cc)
  1450. }
  1451. // PromoteMember promotes a learner node to a voting node.
  1452. func (s *EtcdServer) PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
  1453. // only raft leader has information on whether the to-be-promoted learner node is ready. If promoteMember call
  1454. // fails with ErrNotLeader, forward the request to leader node via HTTP. If promoteMember call fails with error
  1455. // other than ErrNotLeader, return the error.
  1456. resp, err := s.promoteMember(ctx, id)
  1457. if err == nil {
  1458. learnerPromoteSucceed.Inc()
  1459. return resp, nil
  1460. }
  1461. if err != ErrNotLeader {
  1462. learnerPromoteFailed.WithLabelValues(err.Error()).Inc()
  1463. return resp, err
  1464. }
  1465. cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
  1466. defer cancel()
  1467. // forward to leader
  1468. for cctx.Err() == nil {
  1469. leader, err := s.waitLeader(cctx)
  1470. if err != nil {
  1471. return nil, err
  1472. }
  1473. for _, url := range leader.PeerURLs {
  1474. resp, err := promoteMemberHTTP(cctx, url, id, s.peerRt)
  1475. if err == nil {
  1476. return resp, nil
  1477. }
  1478. // If member promotion failed, return early. Otherwise keep retry.
  1479. if err == ErrLearnerNotReady || err == membership.ErrIDNotFound || err == membership.ErrMemberNotLearner {
  1480. return nil, err
  1481. }
  1482. }
  1483. }
  1484. if cctx.Err() == context.DeadlineExceeded {
  1485. return nil, ErrTimeout
  1486. }
  1487. return nil, ErrCanceled
  1488. }
  1489. // promoteMember checks whether the to-be-promoted learner node is ready before sending the promote
  1490. // request to raft.
  1491. // The function returns ErrNotLeader if the local node is not raft leader (therefore does not have
  1492. // enough information to determine if the learner node is ready), returns ErrLearnerNotReady if the
  1493. // local node is leader (therefore has enough information) but decided the learner node is not ready
  1494. // to be promoted.
  1495. func (s *EtcdServer) promoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
  1496. if err := s.checkMembershipOperationPermission(ctx); err != nil {
  1497. return nil, err
  1498. }
  1499. // check if we can promote this learner.
  1500. if err := s.mayPromoteMember(types.ID(id)); err != nil {
  1501. return nil, err
  1502. }
  1503. // build the context for the promote confChange. mark IsLearner to false and IsPromote to true.
  1504. promoteChangeContext := membership.ConfigChangeContext{
  1505. Member: membership.Member{
  1506. ID: types.ID(id),
  1507. },
  1508. IsPromote: true,
  1509. }
  1510. b, err := json.Marshal(promoteChangeContext)
  1511. if err != nil {
  1512. return nil, err
  1513. }
  1514. cc := raftpb.ConfChange{
  1515. Type: raftpb.ConfChangeAddNode,
  1516. NodeID: id,
  1517. Context: b,
  1518. }
  1519. return s.configure(ctx, cc)
  1520. }
  1521. func (s *EtcdServer) mayPromoteMember(id types.ID) error {
  1522. err := s.isLearnerReady(uint64(id))
  1523. if err != nil {
  1524. return err
  1525. }
  1526. if !s.Cfg.StrictReconfigCheck {
  1527. return nil
  1528. }
  1529. if !s.cluster.IsReadyToPromoteMember(uint64(id)) {
  1530. if lg := s.getLogger(); lg != nil {
  1531. lg.Warn(
  1532. "rejecting member promote request; not enough healthy members",
  1533. zap.String("local-member-id", s.ID().String()),
  1534. zap.String("requested-member-remove-id", id.String()),
  1535. zap.Error(ErrNotEnoughStartedMembers),
  1536. )
  1537. } else {
  1538. plog.Warningf("not enough started members, rejecting promote member %s", id)
  1539. }
  1540. return ErrNotEnoughStartedMembers
  1541. }
  1542. return nil
  1543. }
  1544. // check whether the learner catches up with leader or not.
  1545. // Note: it will return nil if member is not found in cluster or if member is not learner.
  1546. // These two conditions will be checked before apply phase later.
  1547. func (s *EtcdServer) isLearnerReady(id uint64) error {
  1548. rs := s.raftStatus()
  1549. // leader's raftStatus.Progress is not nil
  1550. if rs.Progress == nil {
  1551. return ErrNotLeader
  1552. }
  1553. var learnerMatch uint64
  1554. isFound := false
  1555. leaderID := rs.ID
  1556. for memberID, progress := range rs.Progress {
  1557. if id == memberID {
  1558. // check its status
  1559. learnerMatch = progress.Match
  1560. isFound = true
  1561. break
  1562. }
  1563. }
  1564. if isFound {
  1565. leaderMatch := rs.Progress[leaderID].Match
  1566. // the learner's Match not caught up with leader yet
  1567. if float64(learnerMatch) < float64(leaderMatch)*readyPercent {
  1568. return ErrLearnerNotReady
  1569. }
  1570. }
  1571. return nil
  1572. }
  1573. func (s *EtcdServer) mayRemoveMember(id types.ID) error {
  1574. if !s.Cfg.StrictReconfigCheck {
  1575. return nil
  1576. }
  1577. isLearner := s.cluster.IsMemberExist(id) && s.cluster.Member(id).IsLearner
  1578. // no need to check quorum when removing non-voting member
  1579. if isLearner {
  1580. return nil
  1581. }
  1582. if !s.cluster.IsReadyToRemoveVotingMember(uint64(id)) {
  1583. if lg := s.getLogger(); lg != nil {
  1584. lg.Warn(
  1585. "rejecting member remove request; not enough healthy members",
  1586. zap.String("local-member-id", s.ID().String()),
  1587. zap.String("requested-member-remove-id", id.String()),
  1588. zap.Error(ErrNotEnoughStartedMembers),
  1589. )
  1590. } else {
  1591. plog.Warningf("not enough started members, rejecting remove member %s", id)
  1592. }
  1593. return ErrNotEnoughStartedMembers
  1594. }
  1595. // downed member is safe to remove since it's not part of the active quorum
  1596. if t := s.r.transport.ActiveSince(id); id != s.ID() && t.IsZero() {
  1597. return nil
  1598. }
  1599. // protect quorum if some members are down
  1600. m := s.cluster.VotingMembers()
  1601. active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), m)
  1602. if (active - 1) < 1+((len(m)-1)/2) {
  1603. if lg := s.getLogger(); lg != nil {
  1604. lg.Warn(
  1605. "rejecting member remove request; local member has not been connected to all peers, reconfigure breaks active quorum",
  1606. zap.String("local-member-id", s.ID().String()),
  1607. zap.String("requested-member-remove", id.String()),
  1608. zap.Int("active-peers", active),
  1609. zap.Error(ErrUnhealthy),
  1610. )
  1611. } else {
  1612. plog.Warningf("reconfigure breaks active quorum, rejecting remove member %s", id)
  1613. }
  1614. return ErrUnhealthy
  1615. }
  1616. return nil
  1617. }
  1618. func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
  1619. b, merr := json.Marshal(memb)
  1620. if merr != nil {
  1621. return nil, merr
  1622. }
  1623. if err := s.checkMembershipOperationPermission(ctx); err != nil {
  1624. return nil, err
  1625. }
  1626. cc := raftpb.ConfChange{
  1627. Type: raftpb.ConfChangeUpdateNode,
  1628. NodeID: uint64(memb.ID),
  1629. Context: b,
  1630. }
  1631. return s.configure(ctx, cc)
  1632. }
  1633. func (s *EtcdServer) setCommittedIndex(v uint64) {
  1634. atomic.StoreUint64(&s.committedIndex, v)
  1635. }
  1636. func (s *EtcdServer) getCommittedIndex() uint64 {
  1637. return atomic.LoadUint64(&s.committedIndex)
  1638. }
  1639. func (s *EtcdServer) setAppliedIndex(v uint64) {
  1640. atomic.StoreUint64(&s.appliedIndex, v)
  1641. }
  1642. func (s *EtcdServer) getAppliedIndex() uint64 {
  1643. return atomic.LoadUint64(&s.appliedIndex)
  1644. }
  1645. func (s *EtcdServer) setTerm(v uint64) {
  1646. atomic.StoreUint64(&s.term, v)
  1647. }
  1648. func (s *EtcdServer) getTerm() uint64 {
  1649. return atomic.LoadUint64(&s.term)
  1650. }
  1651. func (s *EtcdServer) setLead(v uint64) {
  1652. atomic.StoreUint64(&s.lead, v)
  1653. }
  1654. func (s *EtcdServer) getLead() uint64 {
  1655. return atomic.LoadUint64(&s.lead)
  1656. }
  1657. func (s *EtcdServer) leaderChangedNotify() <-chan struct{} {
  1658. s.leaderChangedMu.RLock()
  1659. defer s.leaderChangedMu.RUnlock()
  1660. return s.leaderChanged
  1661. }
  1662. // RaftStatusGetter represents etcd server and Raft progress.
  1663. type RaftStatusGetter interface {
  1664. ID() types.ID
  1665. Leader() types.ID
  1666. CommittedIndex() uint64
  1667. AppliedIndex() uint64
  1668. Term() uint64
  1669. }
  1670. func (s *EtcdServer) ID() types.ID { return s.id }
  1671. func (s *EtcdServer) Leader() types.ID { return types.ID(s.getLead()) }
  1672. func (s *EtcdServer) Lead() uint64 { return s.getLead() }
  1673. func (s *EtcdServer) CommittedIndex() uint64 { return s.getCommittedIndex() }
  1674. func (s *EtcdServer) AppliedIndex() uint64 { return s.getAppliedIndex() }
  1675. func (s *EtcdServer) Term() uint64 { return s.getTerm() }
  1676. type confChangeResponse struct {
  1677. membs []*membership.Member
  1678. err error
  1679. }
  1680. // configure sends a configuration change through consensus and
  1681. // then waits for it to be applied to the server. It
  1682. // will block until the change is performed or there is an error.
  1683. func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*membership.Member, error) {
  1684. cc.ID = s.reqIDGen.Next()
  1685. ch := s.w.Register(cc.ID)
  1686. start := time.Now()
  1687. if err := s.r.ProposeConfChange(ctx, cc); err != nil {
  1688. s.w.Trigger(cc.ID, nil)
  1689. return nil, err
  1690. }
  1691. select {
  1692. case x := <-ch:
  1693. if x == nil {
  1694. if lg := s.getLogger(); lg != nil {
  1695. lg.Panic("failed to configure")
  1696. } else {
  1697. plog.Panicf("configure trigger value should never be nil")
  1698. }
  1699. }
  1700. resp := x.(*confChangeResponse)
  1701. if lg := s.getLogger(); lg != nil {
  1702. lg.Info(
  1703. "applied a configuration change through raft",
  1704. zap.String("local-member-id", s.ID().String()),
  1705. zap.String("raft-conf-change", cc.Type.String()),
  1706. zap.String("raft-conf-change-node-id", types.ID(cc.NodeID).String()),
  1707. )
  1708. }
  1709. return resp.membs, resp.err
  1710. case <-ctx.Done():
  1711. s.w.Trigger(cc.ID, nil) // GC wait
  1712. return nil, s.parseProposeCtxErr(ctx.Err(), start)
  1713. case <-s.stopping:
  1714. return nil, ErrStopped
  1715. }
  1716. }
  1717. // sync proposes a SYNC request and is non-blocking.
  1718. // This makes no guarantee that the request will be proposed or performed.
  1719. // The request will be canceled after the given timeout.
  1720. func (s *EtcdServer) sync(timeout time.Duration) {
  1721. req := pb.Request{
  1722. Method: "SYNC",
  1723. ID: s.reqIDGen.Next(),
  1724. Time: time.Now().UnixNano(),
  1725. }
  1726. data := pbutil.MustMarshal(&req)
  1727. // There is no promise that node has leader when do SYNC request,
  1728. // so it uses goroutine to propose.
  1729. ctx, cancel := context.WithTimeout(s.ctx, timeout)
  1730. s.goAttach(func() {
  1731. s.r.Propose(ctx, data)
  1732. cancel()
  1733. })
  1734. }
  1735. // publish registers server information into the cluster. The information
  1736. // is the JSON representation of this server's member struct, updated with the
  1737. // static clientURLs of the server.
  1738. // The function keeps attempting to register until it succeeds,
  1739. // or its server is stopped.
  1740. func (s *EtcdServer) publish(timeout time.Duration) {
  1741. b, err := json.Marshal(s.attributes)
  1742. if err != nil {
  1743. if lg := s.getLogger(); lg != nil {
  1744. lg.Panic("failed to marshal JSON", zap.Error(err))
  1745. } else {
  1746. plog.Panicf("json marshal error: %v", err)
  1747. }
  1748. return
  1749. }
  1750. req := pb.Request{
  1751. Method: "PUT",
  1752. Path: membership.MemberAttributesStorePath(s.id),
  1753. Val: string(b),
  1754. }
  1755. for {
  1756. ctx, cancel := context.WithTimeout(s.ctx, timeout)
  1757. _, err := s.Do(ctx, req)
  1758. cancel()
  1759. switch err {
  1760. case nil:
  1761. close(s.readych)
  1762. if lg := s.getLogger(); lg != nil {
  1763. lg.Info(
  1764. "published local member to cluster through raft",
  1765. zap.String("local-member-id", s.ID().String()),
  1766. zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)),
  1767. zap.String("request-path", req.Path),
  1768. zap.String("cluster-id", s.cluster.ID().String()),
  1769. zap.Duration("publish-timeout", timeout),
  1770. )
  1771. } else {
  1772. plog.Infof("published %+v to cluster %s", s.attributes, s.cluster.ID())
  1773. }
  1774. return
  1775. case ErrStopped:
  1776. if lg := s.getLogger(); lg != nil {
  1777. lg.Warn(
  1778. "stopped publish because server is stopped",
  1779. zap.String("local-member-id", s.ID().String()),
  1780. zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)),
  1781. zap.Duration("publish-timeout", timeout),
  1782. zap.Error(err),
  1783. )
  1784. } else {
  1785. plog.Infof("aborting publish because server is stopped")
  1786. }
  1787. return
  1788. default:
  1789. if lg := s.getLogger(); lg != nil {
  1790. lg.Warn(
  1791. "failed to publish local member to cluster through raft",
  1792. zap.String("local-member-id", s.ID().String()),
  1793. zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)),
  1794. zap.String("request-path", req.Path),
  1795. zap.Duration("publish-timeout", timeout),
  1796. zap.Error(err),
  1797. )
  1798. } else {
  1799. plog.Errorf("publish error: %v", err)
  1800. }
  1801. }
  1802. }
  1803. }
  1804. func (s *EtcdServer) sendMergedSnap(merged snap.Message) {
  1805. atomic.AddInt64(&s.inflightSnapshots, 1)
  1806. lg := s.getLogger()
  1807. fields := []zap.Field{
  1808. zap.String("from", s.ID().String()),
  1809. zap.String("to", types.ID(merged.To).String()),
  1810. zap.Int64("bytes", merged.TotalSize),
  1811. zap.String("size", humanize.Bytes(uint64(merged.TotalSize))),
  1812. }
  1813. now := time.Now()
  1814. s.r.transport.SendSnapshot(merged)
  1815. if lg != nil {
  1816. lg.Info("sending merged snapshot", fields...)
  1817. }
  1818. s.goAttach(func() {
  1819. select {
  1820. case ok := <-merged.CloseNotify():
  1821. // delay releasing inflight snapshot for another 30 seconds to
  1822. // block log compaction.
  1823. // If the follower still fails to catch up, it is probably just too slow
  1824. // to catch up. We cannot avoid the snapshot cycle anyway.
  1825. if ok {
  1826. select {
  1827. case <-time.After(releaseDelayAfterSnapshot):
  1828. case <-s.stopping:
  1829. }
  1830. }
  1831. atomic.AddInt64(&s.inflightSnapshots, -1)
  1832. if lg != nil {
  1833. lg.Info("sent merged snapshot", append(fields, zap.Duration("took", time.Since(now)))...)
  1834. }
  1835. case <-s.stopping:
  1836. if lg != nil {
  1837. lg.Warn("canceled sending merged snapshot; server stopping", fields...)
  1838. }
  1839. return
  1840. }
  1841. })
  1842. }
  1843. // apply takes entries received from Raft (after it has been committed) and
  1844. // applies them to the current state of the EtcdServer.
  1845. // The given entries should not be empty.
  1846. func (s *EtcdServer) apply(
  1847. es []raftpb.Entry,
  1848. confState *raftpb.ConfState,
  1849. ) (appliedt uint64, appliedi uint64, shouldStop bool) {
  1850. for i := range es {
  1851. e := es[i]
  1852. switch e.Type {
  1853. case raftpb.EntryNormal:
  1854. s.applyEntryNormal(&e)
  1855. s.setAppliedIndex(e.Index)
  1856. s.setTerm(e.Term)
  1857. case raftpb.EntryConfChange:
  1858. // set the consistent index of current executing entry
  1859. if e.Index > s.consistIndex.ConsistentIndex() {
  1860. s.consistIndex.setConsistentIndex(e.Index)
  1861. }
  1862. var cc raftpb.ConfChange
  1863. pbutil.MustUnmarshal(&cc, e.Data)
  1864. removedSelf, err := s.applyConfChange(cc, confState)
  1865. s.setAppliedIndex(e.Index)
  1866. s.setTerm(e.Term)
  1867. shouldStop = shouldStop || removedSelf
  1868. s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), err})
  1869. default:
  1870. if lg := s.getLogger(); lg != nil {
  1871. lg.Panic(
  1872. "unknown entry type; must be either EntryNormal or EntryConfChange",
  1873. zap.String("type", e.Type.String()),
  1874. )
  1875. } else {
  1876. plog.Panicf("entry type should be either EntryNormal or EntryConfChange")
  1877. }
  1878. }
  1879. appliedi, appliedt = e.Index, e.Term
  1880. }
  1881. return appliedt, appliedi, shouldStop
  1882. }
  1883. // applyEntryNormal apples an EntryNormal type raftpb request to the EtcdServer
  1884. func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
  1885. shouldApplyV3 := false
  1886. if e.Index > s.consistIndex.ConsistentIndex() {
  1887. // set the consistent index of current executing entry
  1888. s.consistIndex.setConsistentIndex(e.Index)
  1889. shouldApplyV3 = true
  1890. }
  1891. // raft state machine may generate noop entry when leader confirmation.
  1892. // skip it in advance to avoid some potential bug in the future
  1893. if len(e.Data) == 0 {
  1894. select {
  1895. case s.forceVersionC <- struct{}{}:
  1896. default:
  1897. }
  1898. // promote lessor when the local member is leader and finished
  1899. // applying all entries from the last term.
  1900. if s.isLeader() {
  1901. s.lessor.Promote(s.Cfg.electionTimeout())
  1902. }
  1903. return
  1904. }
  1905. var raftReq pb.InternalRaftRequest
  1906. if !pbutil.MaybeUnmarshal(&raftReq, e.Data) { // backward compatible
  1907. var r pb.Request
  1908. rp := &r
  1909. pbutil.MustUnmarshal(rp, e.Data)
  1910. s.w.Trigger(r.ID, s.applyV2Request((*RequestV2)(rp)))
  1911. return
  1912. }
  1913. if raftReq.V2 != nil {
  1914. req := (*RequestV2)(raftReq.V2)
  1915. s.w.Trigger(req.ID, s.applyV2Request(req))
  1916. return
  1917. }
  1918. // do not re-apply applied entries.
  1919. if !shouldApplyV3 {
  1920. return
  1921. }
  1922. id := raftReq.ID
  1923. if id == 0 {
  1924. id = raftReq.Header.ID
  1925. }
  1926. var ar *applyResult
  1927. needResult := s.w.IsRegistered(id)
  1928. if needResult || !noSideEffect(&raftReq) {
  1929. if !needResult && raftReq.Txn != nil {
  1930. removeNeedlessRangeReqs(raftReq.Txn)
  1931. }
  1932. ar = s.applyV3.Apply(&raftReq)
  1933. }
  1934. if ar == nil {
  1935. return
  1936. }
  1937. if ar.err != ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 {
  1938. s.w.Trigger(id, ar)
  1939. return
  1940. }
  1941. if lg := s.getLogger(); lg != nil {
  1942. lg.Warn(
  1943. "message exceeded backend quota; raising alarm",
  1944. zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes),
  1945. zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))),
  1946. zap.Error(ar.err),
  1947. )
  1948. } else {
  1949. plog.Errorf("applying raft message exceeded backend quota")
  1950. }
  1951. s.goAttach(func() {
  1952. a := &pb.AlarmRequest{
  1953. MemberID: uint64(s.ID()),
  1954. Action: pb.AlarmRequest_ACTIVATE,
  1955. Alarm: pb.AlarmType_NOSPACE,
  1956. }
  1957. s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a})
  1958. s.w.Trigger(id, ar)
  1959. })
  1960. }
  1961. // applyConfChange applies a ConfChange to the server. It is only
  1962. // invoked with a ConfChange that has already passed through Raft
  1963. func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState) (bool, error) {
  1964. if err := s.cluster.ValidateConfigurationChange(cc); err != nil {
  1965. cc.NodeID = raft.None
  1966. s.r.ApplyConfChange(cc)
  1967. return false, err
  1968. }
  1969. lg := s.getLogger()
  1970. *confState = *s.r.ApplyConfChange(cc)
  1971. switch cc.Type {
  1972. case raftpb.ConfChangeAddNode, raftpb.ConfChangeAddLearnerNode:
  1973. confChangeContext := new(membership.ConfigChangeContext)
  1974. if err := json.Unmarshal(cc.Context, confChangeContext); err != nil {
  1975. if lg != nil {
  1976. lg.Panic("failed to unmarshal member", zap.Error(err))
  1977. } else {
  1978. plog.Panicf("unmarshal member should never fail: %v", err)
  1979. }
  1980. }
  1981. if cc.NodeID != uint64(confChangeContext.Member.ID) {
  1982. if lg != nil {
  1983. lg.Panic(
  1984. "got different member ID",
  1985. zap.String("member-id-from-config-change-entry", types.ID(cc.NodeID).String()),
  1986. zap.String("member-id-from-message", confChangeContext.Member.ID.String()),
  1987. )
  1988. } else {
  1989. plog.Panicf("nodeID should always be equal to member ID")
  1990. }
  1991. }
  1992. if confChangeContext.IsPromote {
  1993. s.cluster.PromoteMember(confChangeContext.Member.ID)
  1994. } else {
  1995. s.cluster.AddMember(&confChangeContext.Member)
  1996. if confChangeContext.Member.ID != s.id {
  1997. s.r.transport.AddPeer(confChangeContext.Member.ID, confChangeContext.PeerURLs)
  1998. }
  1999. }
  2000. // update the isLearner metric when this server id is equal to the id in raft member confChange
  2001. if confChangeContext.Member.ID == s.id {
  2002. if cc.Type == raftpb.ConfChangeAddLearnerNode {
  2003. isLearner.Set(1)
  2004. } else {
  2005. isLearner.Set(0)
  2006. }
  2007. }
  2008. case raftpb.ConfChangeRemoveNode:
  2009. id := types.ID(cc.NodeID)
  2010. s.cluster.RemoveMember(id)
  2011. if id == s.id {
  2012. return true, nil
  2013. }
  2014. s.r.transport.RemovePeer(id)
  2015. case raftpb.ConfChangeUpdateNode:
  2016. m := new(membership.Member)
  2017. if err := json.Unmarshal(cc.Context, m); err != nil {
  2018. if lg != nil {
  2019. lg.Panic("failed to unmarshal member", zap.Error(err))
  2020. } else {
  2021. plog.Panicf("unmarshal member should never fail: %v", err)
  2022. }
  2023. }
  2024. if cc.NodeID != uint64(m.ID) {
  2025. if lg != nil {
  2026. lg.Panic(
  2027. "got different member ID",
  2028. zap.String("member-id-from-config-change-entry", types.ID(cc.NodeID).String()),
  2029. zap.String("member-id-from-message", m.ID.String()),
  2030. )
  2031. } else {
  2032. plog.Panicf("nodeID should always be equal to member ID")
  2033. }
  2034. }
  2035. s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes)
  2036. if m.ID != s.id {
  2037. s.r.transport.UpdatePeer(m.ID, m.PeerURLs)
  2038. }
  2039. }
  2040. return false, nil
  2041. }
  2042. // TODO: non-blocking snapshot
  2043. func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) {
  2044. clone := s.v2store.Clone()
  2045. // commit kv to write metadata (for example: consistent index) to disk.
  2046. // KV().commit() updates the consistent index in backend.
  2047. // All operations that update consistent index must be called sequentially
  2048. // from applyAll function.
  2049. // So KV().Commit() cannot run in parallel with apply. It has to be called outside
  2050. // the go routine created below.
  2051. s.KV().Commit()
  2052. s.goAttach(func() {
  2053. lg := s.getLogger()
  2054. d, err := clone.SaveNoCopy()
  2055. // TODO: current store will never fail to do a snapshot
  2056. // what should we do if the store might fail?
  2057. if err != nil {
  2058. if lg != nil {
  2059. lg.Panic("failed to save v2 store", zap.Error(err))
  2060. } else {
  2061. plog.Panicf("store save should never fail: %v", err)
  2062. }
  2063. }
  2064. snap, err := s.r.raftStorage.CreateSnapshot(snapi, &confState, d)
  2065. if err != nil {
  2066. // the snapshot was done asynchronously with the progress of raft.
  2067. // raft might have already got a newer snapshot.
  2068. if err == raft.ErrSnapOutOfDate {
  2069. return
  2070. }
  2071. if lg != nil {
  2072. lg.Panic("failed to create snapshot", zap.Error(err))
  2073. } else {
  2074. plog.Panicf("unexpected create snapshot error %v", err)
  2075. }
  2076. }
  2077. // SaveSnap saves the snapshot and releases the locked wal files
  2078. // to the snapshot index.
  2079. if err = s.r.storage.SaveSnap(snap); err != nil {
  2080. if lg != nil {
  2081. lg.Panic("failed to save snapshot", zap.Error(err))
  2082. } else {
  2083. plog.Fatalf("save snapshot error: %v", err)
  2084. }
  2085. }
  2086. if lg != nil {
  2087. lg.Info(
  2088. "saved snapshot",
  2089. zap.Uint64("snapshot-index", snap.Metadata.Index),
  2090. )
  2091. } else {
  2092. plog.Infof("saved snapshot at index %d", snap.Metadata.Index)
  2093. }
  2094. // When sending a snapshot, etcd will pause compaction.
  2095. // After receives a snapshot, the slow follower needs to get all the entries right after
  2096. // the snapshot sent to catch up. If we do not pause compaction, the log entries right after
  2097. // the snapshot sent might already be compacted. It happens when the snapshot takes long time
  2098. // to send and save. Pausing compaction avoids triggering a snapshot sending cycle.
  2099. if atomic.LoadInt64(&s.inflightSnapshots) != 0 {
  2100. if lg != nil {
  2101. lg.Info("skip compaction since there is an inflight snapshot")
  2102. } else {
  2103. plog.Infof("skip compaction since there is an inflight snapshot")
  2104. }
  2105. return
  2106. }
  2107. // keep some in memory log entries for slow followers.
  2108. compacti := uint64(1)
  2109. if snapi > s.Cfg.SnapshotCatchUpEntries {
  2110. compacti = snapi - s.Cfg.SnapshotCatchUpEntries
  2111. }
  2112. err = s.r.raftStorage.Compact(compacti)
  2113. if err != nil {
  2114. // the compaction was done asynchronously with the progress of raft.
  2115. // raft log might already been compact.
  2116. if err == raft.ErrCompacted {
  2117. return
  2118. }
  2119. if lg != nil {
  2120. lg.Panic("failed to compact", zap.Error(err))
  2121. } else {
  2122. plog.Panicf("unexpected compaction error %v", err)
  2123. }
  2124. }
  2125. if lg != nil {
  2126. lg.Info(
  2127. "compacted Raft logs",
  2128. zap.Uint64("compact-index", compacti),
  2129. )
  2130. } else {
  2131. plog.Infof("compacted raft log at %d", compacti)
  2132. }
  2133. })
  2134. }
  2135. // CutPeer drops messages to the specified peer.
  2136. func (s *EtcdServer) CutPeer(id types.ID) {
  2137. tr, ok := s.r.transport.(*rafthttp.Transport)
  2138. if ok {
  2139. tr.CutPeer(id)
  2140. }
  2141. }
  2142. // MendPeer recovers the message dropping behavior of the given peer.
  2143. func (s *EtcdServer) MendPeer(id types.ID) {
  2144. tr, ok := s.r.transport.(*rafthttp.Transport)
  2145. if ok {
  2146. tr.MendPeer(id)
  2147. }
  2148. }
  2149. func (s *EtcdServer) PauseSending() { s.r.pauseSending() }
  2150. func (s *EtcdServer) ResumeSending() { s.r.resumeSending() }
  2151. func (s *EtcdServer) ClusterVersion() *semver.Version {
  2152. if s.cluster == nil {
  2153. return nil
  2154. }
  2155. return s.cluster.Version()
  2156. }
  2157. // monitorVersions checks the member's version every monitorVersionInterval.
  2158. // It updates the cluster version if all members agrees on a higher one.
  2159. // It prints out log if there is a member with a higher version than the
  2160. // local version.
  2161. func (s *EtcdServer) monitorVersions() {
  2162. for {
  2163. select {
  2164. case <-s.forceVersionC:
  2165. case <-time.After(monitorVersionInterval):
  2166. case <-s.stopping:
  2167. return
  2168. }
  2169. if s.Leader() != s.ID() {
  2170. continue
  2171. }
  2172. v := decideClusterVersion(s.getLogger(), getVersions(s.getLogger(), s.cluster, s.id, s.peerRt))
  2173. if v != nil {
  2174. // only keep major.minor version for comparison
  2175. v = &semver.Version{
  2176. Major: v.Major,
  2177. Minor: v.Minor,
  2178. }
  2179. }
  2180. // if the current version is nil:
  2181. // 1. use the decided version if possible
  2182. // 2. or use the min cluster version
  2183. if s.cluster.Version() == nil {
  2184. verStr := version.MinClusterVersion
  2185. if v != nil {
  2186. verStr = v.String()
  2187. }
  2188. s.goAttach(func() { s.updateClusterVersion(verStr) })
  2189. continue
  2190. }
  2191. // update cluster version only if the decided version is greater than
  2192. // the current cluster version
  2193. if v != nil && s.cluster.Version().LessThan(*v) {
  2194. s.goAttach(func() { s.updateClusterVersion(v.String()) })
  2195. }
  2196. }
  2197. }
  2198. func (s *EtcdServer) updateClusterVersion(ver string) {
  2199. lg := s.getLogger()
  2200. if s.cluster.Version() == nil {
  2201. if lg != nil {
  2202. lg.Info(
  2203. "setting up initial cluster version",
  2204. zap.String("cluster-version", version.Cluster(ver)),
  2205. )
  2206. } else {
  2207. plog.Infof("setting up the initial cluster version to %s", version.Cluster(ver))
  2208. }
  2209. } else {
  2210. if lg != nil {
  2211. lg.Info(
  2212. "updating cluster version",
  2213. zap.String("from", version.Cluster(s.cluster.Version().String())),
  2214. zap.String("to", version.Cluster(ver)),
  2215. )
  2216. } else {
  2217. plog.Infof("updating the cluster version from %s to %s", version.Cluster(s.cluster.Version().String()), version.Cluster(ver))
  2218. }
  2219. }
  2220. req := pb.Request{
  2221. Method: "PUT",
  2222. Path: membership.StoreClusterVersionKey(),
  2223. Val: ver,
  2224. }
  2225. ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout())
  2226. _, err := s.Do(ctx, req)
  2227. cancel()
  2228. switch err {
  2229. case nil:
  2230. if lg != nil {
  2231. lg.Info("cluster version is updated", zap.String("cluster-version", version.Cluster(ver)))
  2232. }
  2233. return
  2234. case ErrStopped:
  2235. if lg != nil {
  2236. lg.Warn("aborting cluster version update; server is stopped", zap.Error(err))
  2237. } else {
  2238. plog.Infof("aborting update cluster version because server is stopped")
  2239. }
  2240. return
  2241. default:
  2242. if lg != nil {
  2243. lg.Warn("failed to update cluster version", zap.Error(err))
  2244. } else {
  2245. plog.Errorf("error updating cluster version (%v)", err)
  2246. }
  2247. }
  2248. }
  2249. func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error {
  2250. switch err {
  2251. case context.Canceled:
  2252. return ErrCanceled
  2253. case context.DeadlineExceeded:
  2254. s.leadTimeMu.RLock()
  2255. curLeadElected := s.leadElectedTime
  2256. s.leadTimeMu.RUnlock()
  2257. prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond)
  2258. if start.After(prevLeadLost) && start.Before(curLeadElected) {
  2259. return ErrTimeoutDueToLeaderFail
  2260. }
  2261. lead := types.ID(s.getLead())
  2262. switch lead {
  2263. case types.ID(raft.None):
  2264. // TODO: return error to specify it happens because the cluster does not have leader now
  2265. case s.ID():
  2266. if !isConnectedToQuorumSince(s.r.transport, start, s.ID(), s.cluster.Members()) {
  2267. return ErrTimeoutDueToConnectionLost
  2268. }
  2269. default:
  2270. if !isConnectedSince(s.r.transport, start, lead) {
  2271. return ErrTimeoutDueToConnectionLost
  2272. }
  2273. }
  2274. return ErrTimeout
  2275. default:
  2276. return err
  2277. }
  2278. }
  2279. func (s *EtcdServer) KV() mvcc.ConsistentWatchableKV { return s.kv }
  2280. func (s *EtcdServer) Backend() backend.Backend {
  2281. s.bemu.Lock()
  2282. defer s.bemu.Unlock()
  2283. return s.be
  2284. }
  2285. func (s *EtcdServer) AuthStore() auth.AuthStore { return s.authStore }
  2286. func (s *EtcdServer) restoreAlarms() error {
  2287. s.applyV3 = s.newApplierV3()
  2288. as, err := v3alarm.NewAlarmStore(s)
  2289. if err != nil {
  2290. return err
  2291. }
  2292. s.alarmStore = as
  2293. if len(as.Get(pb.AlarmType_NOSPACE)) > 0 {
  2294. s.applyV3 = newApplierV3Capped(s.applyV3)
  2295. }
  2296. if len(as.Get(pb.AlarmType_CORRUPT)) > 0 {
  2297. s.applyV3 = newApplierV3Corrupt(s.applyV3)
  2298. }
  2299. return nil
  2300. }
  2301. // goAttach creates a goroutine on a given function and tracks it using
  2302. // the etcdserver waitgroup.
  2303. func (s *EtcdServer) goAttach(f func()) {
  2304. s.wgMu.RLock() // this blocks with ongoing close(s.stopping)
  2305. defer s.wgMu.RUnlock()
  2306. select {
  2307. case <-s.stopping:
  2308. if lg := s.getLogger(); lg != nil {
  2309. lg.Warn("server has stopped; skipping goAttach")
  2310. } else {
  2311. plog.Warning("server has stopped (skipping goAttach)")
  2312. }
  2313. return
  2314. default:
  2315. }
  2316. // now safe to add since waitgroup wait has not started yet
  2317. s.wg.Add(1)
  2318. go func() {
  2319. defer s.wg.Done()
  2320. f()
  2321. }()
  2322. }
  2323. func (s *EtcdServer) Alarms() []*pb.AlarmMember {
  2324. return s.alarmStore.Get(pb.AlarmType_NONE)
  2325. }
  2326. func (s *EtcdServer) Logger() *zap.Logger {
  2327. return s.lg
  2328. }
  2329. // IsLearner returns if the local member is raft learner
  2330. func (s *EtcdServer) IsLearner() bool {
  2331. return s.cluster.IsLocalMemberLearner()
  2332. }
  2333. // IsMemberExist returns if the member with the given id exists in cluster.
  2334. func (s *EtcdServer) IsMemberExist(id types.ID) bool {
  2335. return s.cluster.IsMemberExist(id)
  2336. }
  2337. // raftStatus returns the raft status of this etcd node.
  2338. func (s *EtcdServer) raftStatus() raft.Status {
  2339. return s.r.Node.Status()
  2340. }