config.go 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306
  1. // Copyright 2015 The etcd Authors
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package etcdserver
  15. import (
  16. "context"
  17. "fmt"
  18. "path/filepath"
  19. "sort"
  20. "strings"
  21. "time"
  22. "go.etcd.io/etcd/pkg/netutil"
  23. "go.etcd.io/etcd/pkg/transport"
  24. "go.etcd.io/etcd/pkg/types"
  25. bolt "go.etcd.io/bbolt"
  26. "go.uber.org/zap"
  27. "go.uber.org/zap/zapcore"
  28. )
  29. // ServerConfig holds the configuration of etcd as taken from the command line or discovery.
  30. type ServerConfig struct {
  31. Name string
  32. DiscoveryURL string
  33. DiscoveryProxy string
  34. ClientURLs types.URLs
  35. PeerURLs types.URLs
  36. DataDir string
  37. // DedicatedWALDir config will make the etcd to write the WAL to the WALDir
  38. // rather than the dataDir/member/wal.
  39. DedicatedWALDir string
  40. SnapshotCount uint64
  41. // SnapshotCatchUpEntries is the number of entries for a slow follower
  42. // to catch-up after compacting the raft storage entries.
  43. // We expect the follower has a millisecond level latency with the leader.
  44. // The max throughput is around 10K. Keep a 5K entries is enough for helping
  45. // follower to catch up.
  46. // WARNING: only change this for tests. Always use "DefaultSnapshotCatchUpEntries"
  47. SnapshotCatchUpEntries uint64
  48. MaxSnapFiles uint
  49. MaxWALFiles uint
  50. // BackendBatchInterval is the maximum time before commit the backend transaction.
  51. BackendBatchInterval time.Duration
  52. // BackendBatchLimit is the maximum operations before commit the backend transaction.
  53. BackendBatchLimit int
  54. // BackendFreelistType is the type of the backend boltdb freelist.
  55. BackendFreelistType bolt.FreelistType
  56. InitialPeerURLsMap types.URLsMap
  57. InitialClusterToken string
  58. NewCluster bool
  59. PeerTLSInfo transport.TLSInfo
  60. CORS map[string]struct{}
  61. // HostWhitelist lists acceptable hostnames from client requests.
  62. // If server is insecure (no TLS), server only accepts requests
  63. // whose Host header value exists in this white list.
  64. HostWhitelist map[string]struct{}
  65. TickMs uint
  66. ElectionTicks int
  67. // InitialElectionTickAdvance is true, then local member fast-forwards
  68. // election ticks to speed up "initial" leader election trigger. This
  69. // benefits the case of larger election ticks. For instance, cross
  70. // datacenter deployment may require longer election timeout of 10-second.
  71. // If true, local node does not need wait up to 10-second. Instead,
  72. // forwards its election ticks to 8-second, and have only 2-second left
  73. // before leader election.
  74. //
  75. // Major assumptions are that:
  76. // - cluster has no active leader thus advancing ticks enables faster
  77. // leader election, or
  78. // - cluster already has an established leader, and rejoining follower
  79. // is likely to receive heartbeats from the leader after tick advance
  80. // and before election timeout.
  81. //
  82. // However, when network from leader to rejoining follower is congested,
  83. // and the follower does not receive leader heartbeat within left election
  84. // ticks, disruptive election has to happen thus affecting cluster
  85. // availabilities.
  86. //
  87. // Disabling this would slow down initial bootstrap process for cross
  88. // datacenter deployments. Make your own tradeoffs by configuring
  89. // --initial-election-tick-advance at the cost of slow initial bootstrap.
  90. //
  91. // If single-node, it advances ticks regardless.
  92. //
  93. // See https://github.com/etcd-io/etcd/issues/9333 for more detail.
  94. InitialElectionTickAdvance bool
  95. BootstrapTimeout time.Duration
  96. AutoCompactionRetention time.Duration
  97. AutoCompactionMode string
  98. QuotaBackendBytes int64
  99. MaxTxnOps uint
  100. // MaxRequestBytes is the maximum request size to send over raft.
  101. MaxRequestBytes uint
  102. StrictReconfigCheck bool
  103. // ClientCertAuthEnabled is true when cert has been signed by the client CA.
  104. ClientCertAuthEnabled bool
  105. AuthToken string
  106. BcryptCost uint
  107. // InitialCorruptCheck is true to check data corruption on boot
  108. // before serving any peer/client traffic.
  109. InitialCorruptCheck bool
  110. CorruptCheckTime time.Duration
  111. // PreVote is true to enable Raft Pre-Vote.
  112. PreVote bool
  113. // Logger logs server-side operations.
  114. // If not nil, it disables "capnslog" and uses the given logger.
  115. Logger *zap.Logger
  116. // LoggerConfig is server logger configuration for Raft logger.
  117. // Must be either: "LoggerConfig != nil" or "LoggerCore != nil && LoggerWriteSyncer != nil".
  118. LoggerConfig *zap.Config
  119. // LoggerCore is "zapcore.Core" for raft logger.
  120. // Must be either: "LoggerConfig != nil" or "LoggerCore != nil && LoggerWriteSyncer != nil".
  121. LoggerCore zapcore.Core
  122. LoggerWriteSyncer zapcore.WriteSyncer
  123. Debug bool
  124. ForceNewCluster bool
  125. // EnableLeaseCheckpoint enables primary lessor to persist lease remainingTTL to prevent indefinite auto-renewal of long lived leases.
  126. EnableLeaseCheckpoint bool
  127. // LeaseCheckpointInterval time.Duration is the wait duration between lease checkpoints.
  128. LeaseCheckpointInterval time.Duration
  129. EnableGRPCGateway bool
  130. }
  131. // VerifyBootstrap sanity-checks the initial config for bootstrap case
  132. // and returns an error for things that should never happen.
  133. func (c *ServerConfig) VerifyBootstrap() error {
  134. if err := c.hasLocalMember(); err != nil {
  135. return err
  136. }
  137. if err := c.advertiseMatchesCluster(); err != nil {
  138. return err
  139. }
  140. if checkDuplicateURL(c.InitialPeerURLsMap) {
  141. return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap)
  142. }
  143. if c.InitialPeerURLsMap.String() == "" && c.DiscoveryURL == "" {
  144. return fmt.Errorf("initial cluster unset and no discovery URL found")
  145. }
  146. return nil
  147. }
  148. // VerifyJoinExisting sanity-checks the initial config for join existing cluster
  149. // case and returns an error for things that should never happen.
  150. func (c *ServerConfig) VerifyJoinExisting() error {
  151. // The member has announced its peer urls to the cluster before starting; no need to
  152. // set the configuration again.
  153. if err := c.hasLocalMember(); err != nil {
  154. return err
  155. }
  156. if checkDuplicateURL(c.InitialPeerURLsMap) {
  157. return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap)
  158. }
  159. if c.DiscoveryURL != "" {
  160. return fmt.Errorf("discovery URL should not be set when joining existing initial cluster")
  161. }
  162. return nil
  163. }
  164. // hasLocalMember checks that the cluster at least contains the local server.
  165. func (c *ServerConfig) hasLocalMember() error {
  166. if urls := c.InitialPeerURLsMap[c.Name]; urls == nil {
  167. return fmt.Errorf("couldn't find local name %q in the initial cluster configuration", c.Name)
  168. }
  169. return nil
  170. }
  171. // advertiseMatchesCluster confirms peer URLs match those in the cluster peer list.
  172. func (c *ServerConfig) advertiseMatchesCluster() error {
  173. urls, apurls := c.InitialPeerURLsMap[c.Name], c.PeerURLs.StringSlice()
  174. urls.Sort()
  175. sort.Strings(apurls)
  176. ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
  177. defer cancel()
  178. ok, err := netutil.URLStringsEqual(ctx, c.Logger, apurls, urls.StringSlice())
  179. if ok {
  180. return nil
  181. }
  182. initMap, apMap := make(map[string]struct{}), make(map[string]struct{})
  183. for _, url := range c.PeerURLs {
  184. apMap[url.String()] = struct{}{}
  185. }
  186. for _, url := range c.InitialPeerURLsMap[c.Name] {
  187. initMap[url.String()] = struct{}{}
  188. }
  189. missing := []string{}
  190. for url := range initMap {
  191. if _, ok := apMap[url]; !ok {
  192. missing = append(missing, url)
  193. }
  194. }
  195. if len(missing) > 0 {
  196. for i := range missing {
  197. missing[i] = c.Name + "=" + missing[i]
  198. }
  199. mstr := strings.Join(missing, ",")
  200. apStr := strings.Join(apurls, ",")
  201. return fmt.Errorf("--initial-cluster has %s but missing from --initial-advertise-peer-urls=%s (%v)", mstr, apStr, err)
  202. }
  203. for url := range apMap {
  204. if _, ok := initMap[url]; !ok {
  205. missing = append(missing, url)
  206. }
  207. }
  208. if len(missing) > 0 {
  209. mstr := strings.Join(missing, ",")
  210. umap := types.URLsMap(map[string]types.URLs{c.Name: c.PeerURLs})
  211. return fmt.Errorf("--initial-advertise-peer-urls has %s but missing from --initial-cluster=%s", mstr, umap.String())
  212. }
  213. // resolved URLs from "--initial-advertise-peer-urls" and "--initial-cluster" did not match or failed
  214. apStr := strings.Join(apurls, ",")
  215. umap := types.URLsMap(map[string]types.URLs{c.Name: c.PeerURLs})
  216. return fmt.Errorf("failed to resolve %s to match --initial-cluster=%s (%v)", apStr, umap.String(), err)
  217. }
  218. func (c *ServerConfig) MemberDir() string { return filepath.Join(c.DataDir, "member") }
  219. func (c *ServerConfig) WALDir() string {
  220. if c.DedicatedWALDir != "" {
  221. return c.DedicatedWALDir
  222. }
  223. return filepath.Join(c.MemberDir(), "wal")
  224. }
  225. func (c *ServerConfig) SnapDir() string { return filepath.Join(c.MemberDir(), "snap") }
  226. func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" }
  227. // ReqTimeout returns timeout for request to finish.
  228. func (c *ServerConfig) ReqTimeout() time.Duration {
  229. // 5s for queue waiting, computation and disk IO delay
  230. // + 2 * election timeout for possible leader election
  231. return 5*time.Second + 2*time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond
  232. }
  233. func (c *ServerConfig) electionTimeout() time.Duration {
  234. return time.Duration(c.ElectionTicks*int(c.TickMs)) * time.Millisecond
  235. }
  236. func (c *ServerConfig) peerDialTimeout() time.Duration {
  237. // 1s for queue wait and election timeout
  238. return time.Second + time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond
  239. }
  240. func checkDuplicateURL(urlsmap types.URLsMap) bool {
  241. um := make(map[string]bool)
  242. for _, urls := range urlsmap {
  243. for _, url := range urls {
  244. u := url.String()
  245. if um[u] {
  246. return true
  247. }
  248. um[u] = true
  249. }
  250. }
  251. return false
  252. }
  253. func (c *ServerConfig) bootstrapTimeout() time.Duration {
  254. if c.BootstrapTimeout != 0 {
  255. return c.BootstrapTimeout
  256. }
  257. return time.Second
  258. }
  259. func (c *ServerConfig) backendPath() string { return filepath.Join(c.SnapDir(), "db") }