etcd.go 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484
  1. // Copyright 2015 CoreOS, Inc.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package etcdmain
  15. import (
  16. "encoding/json"
  17. "fmt"
  18. "io/ioutil"
  19. "net"
  20. "net/http"
  21. "os"
  22. "path"
  23. "reflect"
  24. "runtime"
  25. "strconv"
  26. "time"
  27. "github.com/coreos/etcd/Godeps/_workspace/src/github.com/coreos/go-systemd/daemon"
  28. systemdutil "github.com/coreos/etcd/Godeps/_workspace/src/github.com/coreos/go-systemd/util"
  29. "github.com/coreos/etcd/Godeps/_workspace/src/github.com/coreos/pkg/capnslog"
  30. "github.com/coreos/etcd/Godeps/_workspace/src/github.com/prometheus/client_golang/prometheus"
  31. "github.com/coreos/etcd/discovery"
  32. "github.com/coreos/etcd/etcdserver"
  33. "github.com/coreos/etcd/etcdserver/etcdhttp"
  34. "github.com/coreos/etcd/pkg/cors"
  35. "github.com/coreos/etcd/pkg/fileutil"
  36. "github.com/coreos/etcd/pkg/osutil"
  37. runtimeutil "github.com/coreos/etcd/pkg/runtime"
  38. "github.com/coreos/etcd/pkg/transport"
  39. "github.com/coreos/etcd/pkg/types"
  40. "github.com/coreos/etcd/proxy"
  41. "github.com/coreos/etcd/rafthttp"
  42. )
  43. type dirType string
  44. var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdmain")
  45. const (
  46. // the owner can make/remove files inside the directory
  47. privateDirMode = 0700
  48. // internal fd usage includes disk usage and transport usage.
  49. // To read/write snapshot, snap pkg needs 1. In normal case, wal pkg needs
  50. // at most 2 to read/lock/write WALs. One case that it needs to 2 is to
  51. // read all logs after some snapshot index, which locates at the end of
  52. // the second last and the head of the last. For purging, it needs to read
  53. // directory, so it needs 1. For fd monitor, it needs 1.
  54. // For transport, rafthttp builds two long-polling connections and at most
  55. // four temporary connections with each member. There are at most 9 members
  56. // in a cluster, so it should reserve 96.
  57. // For the safety, we set the total reserved number to 150.
  58. reservedInternalFDNum = 150
  59. )
  60. var (
  61. dirMember = dirType("member")
  62. dirProxy = dirType("proxy")
  63. dirEmpty = dirType("empty")
  64. )
  65. func Main() {
  66. cfg := NewConfig()
  67. err := cfg.Parse(os.Args[1:])
  68. if err != nil {
  69. plog.Errorf("error verifying flags, %v. See 'etcd --help'.", err)
  70. switch err {
  71. case errUnsetAdvertiseClientURLsFlag:
  72. plog.Errorf("When listening on specific address(es), this etcd process must advertise accessible url(s) to each connected client.")
  73. }
  74. os.Exit(1)
  75. }
  76. setupLogging(cfg)
  77. var stopped <-chan struct{}
  78. GoMaxProcs := 1
  79. if envMaxProcs, err := strconv.Atoi(os.Getenv("GOMAXPROCS")); err == nil {
  80. GoMaxProcs = envMaxProcs
  81. }
  82. plog.Infof("setting maximum number of CPUs to %d, total number of available CPUs is %d", GoMaxProcs, runtime.NumCPU())
  83. runtime.GOMAXPROCS(GoMaxProcs)
  84. // TODO: check whether fields are set instead of whether fields have default value
  85. if cfg.name != defaultName && cfg.initialCluster == initialClusterFromName(defaultName) {
  86. cfg.initialCluster = initialClusterFromName(cfg.name)
  87. }
  88. if cfg.dir == "" {
  89. cfg.dir = fmt.Sprintf("%v.etcd", cfg.name)
  90. plog.Warningf("no data-dir provided, using default data-dir ./%s", cfg.dir)
  91. }
  92. which := identifyDataDirOrDie(cfg.dir)
  93. if which != dirEmpty {
  94. plog.Noticef("the server is already initialized as %v before, starting as etcd %v...", which, which)
  95. switch which {
  96. case dirMember:
  97. stopped, err = startEtcd(cfg)
  98. case dirProxy:
  99. err = startProxy(cfg)
  100. default:
  101. plog.Panicf("unhandled dir type %v", which)
  102. }
  103. } else {
  104. shouldProxy := cfg.isProxy()
  105. if !shouldProxy {
  106. stopped, err = startEtcd(cfg)
  107. if err == discovery.ErrFullCluster && cfg.shouldFallbackToProxy() {
  108. plog.Noticef("discovery cluster full, falling back to %s", fallbackFlagProxy)
  109. shouldProxy = true
  110. }
  111. }
  112. if shouldProxy {
  113. err = startProxy(cfg)
  114. }
  115. }
  116. if err != nil {
  117. switch err {
  118. case discovery.ErrDuplicateID:
  119. plog.Errorf("member %q has previously registered with discovery service token (%s).", cfg.name, cfg.durl)
  120. plog.Errorf("But etcd could not find valid cluster configuration in the given data dir (%s).", cfg.dir)
  121. plog.Infof("Please check the given data dir path if the previous bootstrap succeeded")
  122. plog.Infof("or use a new discovery token if the previous bootstrap failed.")
  123. os.Exit(1)
  124. case discovery.ErrDuplicateName:
  125. plog.Errorf("member with duplicated name has registered with discovery service token(%s).", cfg.durl)
  126. plog.Errorf("please check (cURL) the discovery token for more information.")
  127. plog.Errorf("please do not reuse the discovery token and generate a new one to bootstrap the cluster.")
  128. default:
  129. plog.Fatalf("%v", err)
  130. }
  131. }
  132. osutil.HandleInterrupts()
  133. if systemdutil.IsRunningSystemd() {
  134. // At this point, the initialization of etcd is done.
  135. // The listeners are listening on the TCP ports and ready
  136. // for accepting connections.
  137. // The http server is probably ready for serving incoming
  138. // connections. If it is not, the connection might be pending
  139. // for less than one second.
  140. err := daemon.SdNotify("READY=1")
  141. if err != nil {
  142. plog.Errorf("failed to notify systemd for readiness")
  143. }
  144. }
  145. <-stopped
  146. osutil.Exit(0)
  147. }
  148. // startEtcd launches the etcd server and HTTP handlers for client/server communication.
  149. func startEtcd(cfg *config) (<-chan struct{}, error) {
  150. urlsmap, token, err := getPeerURLsMapAndToken(cfg)
  151. if err != nil {
  152. return nil, fmt.Errorf("error setting up initial cluster: %v", err)
  153. }
  154. pt, err := transport.NewTimeoutTransport(cfg.peerTLSInfo, rafthttp.DialTimeout, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout)
  155. if err != nil {
  156. return nil, err
  157. }
  158. if !cfg.peerTLSInfo.Empty() {
  159. plog.Infof("peerTLS: %s", cfg.peerTLSInfo)
  160. }
  161. plns := make([]net.Listener, 0)
  162. for _, u := range cfg.lpurls {
  163. if u.Scheme == "http" && !cfg.peerTLSInfo.Empty() {
  164. plog.Warningf("The scheme of peer url %s is http while peer key/cert files are presented. Ignored peer key/cert files.", u.String())
  165. }
  166. var l net.Listener
  167. l, err = transport.NewTimeoutListener(u.Host, u.Scheme, cfg.peerTLSInfo, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout)
  168. if err != nil {
  169. return nil, err
  170. }
  171. urlStr := u.String()
  172. plog.Info("listening for peers on ", urlStr)
  173. defer func() {
  174. if err != nil {
  175. l.Close()
  176. plog.Info("stopping listening for peers on ", urlStr)
  177. }
  178. }()
  179. plns = append(plns, l)
  180. }
  181. if !cfg.clientTLSInfo.Empty() {
  182. plog.Infof("clientTLS: %s", cfg.clientTLSInfo)
  183. }
  184. clns := make([]net.Listener, 0)
  185. for _, u := range cfg.lcurls {
  186. if u.Scheme == "http" && !cfg.clientTLSInfo.Empty() {
  187. plog.Warningf("The scheme of client url %s is http while client key/cert files are presented. Ignored client key/cert files.", u.String())
  188. }
  189. var l net.Listener
  190. l, err = transport.NewKeepAliveListener(u.Host, u.Scheme, cfg.clientTLSInfo)
  191. if err != nil {
  192. return nil, err
  193. }
  194. if fdLimit, err := runtimeutil.FDLimit(); err == nil {
  195. if fdLimit <= reservedInternalFDNum {
  196. plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum)
  197. }
  198. l = &transport.LimitedConnListener{Listener: l, RuntimeFDLimit: fdLimit - reservedInternalFDNum}
  199. }
  200. urlStr := u.String()
  201. plog.Info("listening for client requests on ", urlStr)
  202. defer func() {
  203. if err != nil {
  204. l.Close()
  205. plog.Info("stopping listening for client requests on ", urlStr)
  206. }
  207. }()
  208. clns = append(clns, l)
  209. }
  210. srvcfg := &etcdserver.ServerConfig{
  211. Name: cfg.name,
  212. ClientURLs: cfg.acurls,
  213. PeerURLs: cfg.apurls,
  214. DataDir: cfg.dir,
  215. SnapCount: cfg.snapCount,
  216. MaxSnapFiles: cfg.maxSnapFiles,
  217. MaxWALFiles: cfg.maxWalFiles,
  218. InitialPeerURLsMap: urlsmap,
  219. InitialClusterToken: token,
  220. DiscoveryURL: cfg.durl,
  221. DiscoveryProxy: cfg.dproxy,
  222. NewCluster: cfg.isNewCluster(),
  223. ForceNewCluster: cfg.forceNewCluster,
  224. Transport: pt,
  225. TickMs: cfg.TickMs,
  226. ElectionTicks: cfg.electionTicks(),
  227. }
  228. var s *etcdserver.EtcdServer
  229. s, err = etcdserver.NewServer(srvcfg)
  230. if err != nil {
  231. return nil, err
  232. }
  233. s.Start()
  234. osutil.RegisterInterruptHandler(s.Stop)
  235. if cfg.corsInfo.String() != "" {
  236. plog.Infof("cors = %s", cfg.corsInfo)
  237. }
  238. ch := &cors.CORSHandler{
  239. Handler: etcdhttp.NewClientHandler(s),
  240. Info: cfg.corsInfo,
  241. }
  242. ph := etcdhttp.NewPeerHandler(s.Cluster(), s.RaftHandler())
  243. // Start the peer server in a goroutine
  244. for _, l := range plns {
  245. go func(l net.Listener) {
  246. plog.Fatal(serveHTTP(l, ph, 5*time.Minute))
  247. }(l)
  248. }
  249. // Start a client server goroutine for each listen address
  250. for _, l := range clns {
  251. go func(l net.Listener) {
  252. // read timeout does not work with http close notify
  253. // TODO: https://github.com/golang/go/issues/9524
  254. plog.Fatal(serveHTTP(l, ch, 0))
  255. }(l)
  256. }
  257. return s.StopNotify(), nil
  258. }
  259. // startProxy launches an HTTP proxy for client communication which proxies to other etcd nodes.
  260. func startProxy(cfg *config) error {
  261. urlsmap, _, err := getPeerURLsMapAndToken(cfg)
  262. if err != nil {
  263. return fmt.Errorf("error setting up initial cluster: %v", err)
  264. }
  265. pt, err := transport.NewTimeoutTransport(cfg.peerTLSInfo, time.Duration(cfg.proxyDialTimeoutMs)*time.Millisecond, time.Duration(cfg.proxyReadTimeoutMs)*time.Millisecond, time.Duration(cfg.proxyWriteTimeoutMs)*time.Millisecond)
  266. pt.MaxIdleConnsPerHost = proxy.DefaultMaxIdleConnsPerHost
  267. if err != nil {
  268. return err
  269. }
  270. tr, err := transport.NewTimeoutTransport(cfg.peerTLSInfo, time.Duration(cfg.proxyDialTimeoutMs)*time.Millisecond, time.Duration(cfg.proxyReadTimeoutMs)*time.Millisecond, time.Duration(cfg.proxyWriteTimeoutMs)*time.Millisecond)
  271. if err != nil {
  272. return err
  273. }
  274. cfg.dir = path.Join(cfg.dir, "proxy")
  275. err = os.MkdirAll(cfg.dir, 0700)
  276. if err != nil {
  277. return err
  278. }
  279. var peerURLs []string
  280. clusterfile := path.Join(cfg.dir, "cluster")
  281. b, err := ioutil.ReadFile(clusterfile)
  282. switch {
  283. case err == nil:
  284. if cfg.durl != "" {
  285. plog.Warningf("discovery token ignored since the proxy has already been initialized. Valid cluster file found at %q", clusterfile)
  286. }
  287. urls := struct{ PeerURLs []string }{}
  288. err := json.Unmarshal(b, &urls)
  289. if err != nil {
  290. return err
  291. }
  292. peerURLs = urls.PeerURLs
  293. plog.Infof("proxy: using peer urls %v from cluster file %q", peerURLs, clusterfile)
  294. case os.IsNotExist(err):
  295. if cfg.durl != "" {
  296. s, err := discovery.GetCluster(cfg.durl, cfg.dproxy)
  297. if err != nil {
  298. return err
  299. }
  300. if urlsmap, err = types.NewURLsMap(s); err != nil {
  301. return err
  302. }
  303. }
  304. peerURLs = urlsmap.URLs()
  305. plog.Infof("proxy: using peer urls %v ", peerURLs)
  306. default:
  307. return err
  308. }
  309. clientURLs := []string{}
  310. uf := func() []string {
  311. gcls, err := etcdserver.GetClusterFromRemotePeers(peerURLs, tr)
  312. // TODO: remove the 2nd check when we fix GetClusterFromPeers
  313. // GetClusterFromPeers should not return nil error with an invaild empty cluster
  314. if err != nil {
  315. plog.Warningf("proxy: %v", err)
  316. return []string{}
  317. }
  318. if len(gcls.Members()) == 0 {
  319. return clientURLs
  320. }
  321. clientURLs = gcls.ClientURLs()
  322. urls := struct{ PeerURLs []string }{gcls.PeerURLs()}
  323. b, err := json.Marshal(urls)
  324. if err != nil {
  325. plog.Warningf("proxy: error on marshal peer urls %s", err)
  326. return clientURLs
  327. }
  328. err = ioutil.WriteFile(clusterfile+".bak", b, 0600)
  329. if err != nil {
  330. plog.Warningf("proxy: error on writing urls %s", err)
  331. return clientURLs
  332. }
  333. err = os.Rename(clusterfile+".bak", clusterfile)
  334. if err != nil {
  335. plog.Warningf("proxy: error on updating clusterfile %s", err)
  336. return clientURLs
  337. }
  338. if !reflect.DeepEqual(gcls.PeerURLs(), peerURLs) {
  339. plog.Noticef("proxy: updated peer urls in cluster file from %v to %v", peerURLs, gcls.PeerURLs())
  340. }
  341. peerURLs = gcls.PeerURLs()
  342. return clientURLs
  343. }
  344. ph := proxy.NewHandler(pt, uf, time.Duration(cfg.proxyFailureWaitMs)*time.Millisecond, time.Duration(cfg.proxyRefreshIntervalMs)*time.Millisecond)
  345. ph = &cors.CORSHandler{
  346. Handler: ph,
  347. Info: cfg.corsInfo,
  348. }
  349. if cfg.isReadonlyProxy() {
  350. ph = proxy.NewReadonlyHandler(ph)
  351. }
  352. // Start a proxy server goroutine for each listen address
  353. for _, u := range cfg.lcurls {
  354. l, err := transport.NewListener(u.Host, u.Scheme, cfg.clientTLSInfo)
  355. if err != nil {
  356. return err
  357. }
  358. host := u.Host
  359. go func() {
  360. plog.Info("proxy: listening for client requests on ", host)
  361. mux := http.NewServeMux()
  362. mux.Handle("/metrics", prometheus.Handler())
  363. mux.Handle("/", ph)
  364. plog.Fatal(http.Serve(l, mux))
  365. }()
  366. }
  367. return nil
  368. }
  369. // getPeerURLsMapAndToken sets up an initial peer URLsMap and cluster token for bootstrap or discovery.
  370. func getPeerURLsMapAndToken(cfg *config) (urlsmap types.URLsMap, token string, err error) {
  371. switch {
  372. case cfg.durl != "":
  373. urlsmap = types.URLsMap{}
  374. // If using discovery, generate a temporary cluster based on
  375. // self's advertised peer URLs
  376. urlsmap[cfg.name] = cfg.apurls
  377. token = cfg.durl
  378. case cfg.dnsCluster != "":
  379. var clusterStr string
  380. clusterStr, token, err = discovery.SRVGetCluster(cfg.name, cfg.dnsCluster, cfg.initialClusterToken, cfg.apurls)
  381. if err != nil {
  382. return nil, "", err
  383. }
  384. urlsmap, err = types.NewURLsMap(clusterStr)
  385. if _, ok := urlsmap[cfg.name]; !ok {
  386. return nil, "", fmt.Errorf("cannot find local etcd member %q in SRV records", cfg.name)
  387. }
  388. default:
  389. // We're statically configured, and cluster has appropriately been set.
  390. urlsmap, err = types.NewURLsMap(cfg.initialCluster)
  391. token = cfg.initialClusterToken
  392. }
  393. return urlsmap, token, err
  394. }
  395. // identifyDataDirOrDie returns the type of the data dir.
  396. // Dies if the datadir is invalid.
  397. func identifyDataDirOrDie(dir string) dirType {
  398. names, err := fileutil.ReadDir(dir)
  399. if err != nil {
  400. if os.IsNotExist(err) {
  401. return dirEmpty
  402. }
  403. plog.Fatalf("error listing data dir: %s", dir)
  404. }
  405. var m, p bool
  406. for _, name := range names {
  407. switch dirType(name) {
  408. case dirMember:
  409. m = true
  410. case dirProxy:
  411. p = true
  412. default:
  413. plog.Warningf("found invalid file/dir %s under data dir %s (Ignore this if you are upgrading etcd)", name, dir)
  414. }
  415. }
  416. if m && p {
  417. plog.Fatal("invalid datadir. Both member and proxy directories exist.")
  418. }
  419. if m {
  420. return dirMember
  421. }
  422. if p {
  423. return dirProxy
  424. }
  425. return dirEmpty
  426. }
  427. func setupLogging(cfg *config) {
  428. capnslog.SetGlobalLogLevel(capnslog.INFO)
  429. if cfg.debug {
  430. capnslog.SetGlobalLogLevel(capnslog.DEBUG)
  431. }
  432. if cfg.logPkgLevels != "" {
  433. repoLog := capnslog.MustRepoLogger("github.com/coreos/etcd")
  434. settings, err := repoLog.ParseLogLevelConfig(cfg.logPkgLevels)
  435. if err != nil {
  436. plog.Warningf("couldn't parse log level string: %s, continuing with default levels", err.Error())
  437. return
  438. }
  439. repoLog.SetLogLevel(settings)
  440. }
  441. }