123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497 |
- // Copyright 2016 The etcd Authors
- //
- // Licensed under the Apache License, Version 2.0 (the "License");
- // you may not use this file except in compliance with the License.
- // You may obtain a copy of the License at
- //
- // http://www.apache.org/licenses/LICENSE-2.0
- //
- // Unless required by applicable law or agreed to in writing, software
- // distributed under the License is distributed on an "AS IS" BASIS,
- // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- // See the License for the specific language governing permissions and
- // limitations under the License.
- package embed
- import (
- "context"
- "crypto/tls"
- "fmt"
- "io/ioutil"
- defaultLog "log"
- "net"
- "net/http"
- "sync"
- "time"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api/etcdhttp"
- "github.com/coreos/etcd/etcdserver/api/v2http"
- "github.com/coreos/etcd/etcdserver/api/v3rpc"
- "github.com/coreos/etcd/pkg/cors"
- "github.com/coreos/etcd/pkg/debugutil"
- runtimeutil "github.com/coreos/etcd/pkg/runtime"
- "github.com/coreos/etcd/pkg/transport"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/rafthttp"
- "github.com/cockroachdb/cmux"
- "github.com/coreos/pkg/capnslog"
- "google.golang.org/grpc"
- "google.golang.org/grpc/keepalive"
- )
- var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "embed")
- const (
- // internal fd usage includes disk usage and transport usage.
- // To read/write snapshot, snap pkg needs 1. In normal case, wal pkg needs
- // at most 2 to read/lock/write WALs. One case that it needs to 2 is to
- // read all logs after some snapshot index, which locates at the end of
- // the second last and the head of the last. For purging, it needs to read
- // directory, so it needs 1. For fd monitor, it needs 1.
- // For transport, rafthttp builds two long-polling connections and at most
- // four temporary connections with each member. There are at most 9 members
- // in a cluster, so it should reserve 96.
- // For the safety, we set the total reserved number to 150.
- reservedInternalFDNum = 150
- )
- // Etcd contains a running etcd server and its listeners.
- type Etcd struct {
- Peers []*peerListener
- Clients []net.Listener
- // a map of contexts for the servers that serves client requests.
- sctxs map[string]*serveCtx
- Server *etcdserver.EtcdServer
- cfg Config
- stopc chan struct{}
- errc chan error
- closeOnce sync.Once
- }
- type peerListener struct {
- net.Listener
- serve func() error
- close func(context.Context) error
- }
- // StartEtcd launches the etcd server and HTTP handlers for client/server communication.
- // The returned Etcd.Server is not guaranteed to have joined the cluster. Wait
- // on the Etcd.Server.ReadyNotify() channel to know when it completes and is ready for use.
- func StartEtcd(inCfg *Config) (e *Etcd, err error) {
- if err = inCfg.Validate(); err != nil {
- return nil, err
- }
- serving := false
- e = &Etcd{cfg: *inCfg, stopc: make(chan struct{})}
- cfg := &e.cfg
- defer func() {
- if e == nil || err == nil {
- return
- }
- if !serving {
- // errored before starting gRPC server for serveCtx.serversC
- for _, sctx := range e.sctxs {
- close(sctx.serversC)
- }
- }
- e.Close()
- e = nil
- }()
- if e.Peers, err = startPeerListeners(cfg); err != nil {
- return e, err
- }
- if e.sctxs, err = startClientListeners(cfg); err != nil {
- return e, err
- }
- for _, sctx := range e.sctxs {
- e.Clients = append(e.Clients, sctx.l)
- }
- var (
- urlsmap types.URLsMap
- token string
- )
- if !isMemberInitialized(cfg) {
- urlsmap, token, err = cfg.PeerURLsMapAndToken("etcd")
- if err != nil {
- return e, fmt.Errorf("error setting up initial cluster: %v", err)
- }
- }
- srvcfg := &etcdserver.ServerConfig{
- Name: cfg.Name,
- ClientURLs: cfg.ACUrls,
- PeerURLs: cfg.APUrls,
- DataDir: cfg.Dir,
- DedicatedWALDir: cfg.WalDir,
- SnapCount: cfg.SnapCount,
- MaxSnapFiles: cfg.MaxSnapFiles,
- MaxWALFiles: cfg.MaxWalFiles,
- InitialPeerURLsMap: urlsmap,
- InitialClusterToken: token,
- DiscoveryURL: cfg.Durl,
- DiscoveryProxy: cfg.Dproxy,
- NewCluster: cfg.IsNewCluster(),
- ForceNewCluster: cfg.ForceNewCluster,
- PeerTLSInfo: cfg.PeerTLSInfo,
- TickMs: cfg.TickMs,
- ElectionTicks: cfg.ElectionTicks(),
- InitialElectionTickAdvance: cfg.InitialElectionTickAdvance,
- AutoCompactionRetention: cfg.AutoCompactionRetention,
- QuotaBackendBytes: cfg.QuotaBackendBytes,
- MaxRequestBytes: cfg.MaxRequestBytes,
- StrictReconfigCheck: cfg.StrictReconfigCheck,
- ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth,
- AuthToken: cfg.AuthToken,
- Debug: cfg.Debug,
- }
- if e.Server, err = etcdserver.NewServer(srvcfg); err != nil {
- return e, err
- }
- // buffer channel so goroutines on closed connections won't wait forever
- e.errc = make(chan error, len(e.Peers)+len(e.Clients)+2*len(e.sctxs))
- e.Server.Start()
- if err = e.servePeers(); err != nil {
- return e, err
- }
- if err = e.serveClients(); err != nil {
- return e, err
- }
- serving = true
- return e, nil
- }
- // Config returns the current configuration.
- func (e *Etcd) Config() Config {
- return e.cfg
- }
- // Close gracefully shuts down all servers/listeners.
- // Client requests will be terminated with request timeout.
- // After timeout, enforce remaning requests be closed immediately.
- func (e *Etcd) Close() {
- e.closeOnce.Do(func() { close(e.stopc) })
- // close client requests with request timeout
- timeout := 2 * time.Second
- if e.Server != nil {
- timeout = e.Server.Cfg.ReqTimeout()
- }
- for _, sctx := range e.sctxs {
- for ss := range sctx.serversC {
- ctx, cancel := context.WithTimeout(context.Background(), timeout)
- stopServers(ctx, ss)
- cancel()
- }
- }
- for _, sctx := range e.sctxs {
- sctx.cancel()
- }
- for i := range e.Clients {
- if e.Clients[i] != nil {
- e.Clients[i].Close()
- }
- }
- // close rafthttp transports
- if e.Server != nil {
- e.Server.Stop()
- }
- // close all idle connections in peer handler (wait up to 1-second)
- for i := range e.Peers {
- if e.Peers[i] != nil && e.Peers[i].close != nil {
- ctx, cancel := context.WithTimeout(context.Background(), time.Second)
- e.Peers[i].close(ctx)
- cancel()
- }
- }
- }
- func stopServers(ctx context.Context, ss *servers) {
- shutdownNow := func() {
- // first, close the http.Server
- ss.http.Shutdown(ctx)
- // then close grpc.Server; cancels all active RPCs
- ss.grpc.Stop()
- }
- // do not grpc.Server.GracefulStop with TLS enabled etcd server
- // See https://github.com/grpc/grpc-go/issues/1384#issuecomment-317124531
- // and https://github.com/coreos/etcd/issues/8916
- if ss.secure {
- shutdownNow()
- return
- }
- ch := make(chan struct{})
- go func() {
- defer close(ch)
- // close listeners to stop accepting new connections,
- // will block on any existing transports
- ss.grpc.GracefulStop()
- }()
- // wait until all pending RPCs are finished
- select {
- case <-ch:
- case <-ctx.Done():
- // took too long, manually close open transports
- // e.g. watch streams
- shutdownNow()
- // concurrent GracefulStop should be interrupted
- <-ch
- }
- }
- func (e *Etcd) Err() <-chan error { return e.errc }
- func startPeerListeners(cfg *Config) (peers []*peerListener, err error) {
- if err = updateCipherSuites(&cfg.PeerTLSInfo, cfg.CipherSuites); err != nil {
- return nil, err
- }
- if err = cfg.PeerSelfCert(); err != nil {
- plog.Fatalf("could not get certs (%v)", err)
- }
- if !cfg.PeerTLSInfo.Empty() {
- plog.Infof("peerTLS: %s", cfg.PeerTLSInfo)
- }
- peers = make([]*peerListener, len(cfg.LPUrls))
- defer func() {
- if err == nil {
- return
- }
- for i := range peers {
- if peers[i] != nil && peers[i].close != nil {
- plog.Info("stopping listening for peers on ", cfg.LPUrls[i].String())
- ctx, cancel := context.WithTimeout(context.Background(), time.Second)
- peers[i].close(ctx)
- cancel()
- }
- }
- }()
- for i, u := range cfg.LPUrls {
- if u.Scheme == "http" {
- if !cfg.PeerTLSInfo.Empty() {
- plog.Warningf("The scheme of peer url %s is HTTP while peer key/cert files are presented. Ignored peer key/cert files.", u.String())
- }
- if cfg.PeerTLSInfo.ClientCertAuth {
- plog.Warningf("The scheme of peer url %s is HTTP while client cert auth (--peer-client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String())
- }
- }
- peers[i] = &peerListener{close: func(context.Context) error { return nil }}
- peers[i].Listener, err = rafthttp.NewListener(u, &cfg.PeerTLSInfo)
- if err != nil {
- return nil, err
- }
- // once serve, overwrite with 'http.Server.Shutdown'
- peers[i].close = func(context.Context) error {
- return peers[i].Listener.Close()
- }
- plog.Info("listening for peers on ", u.String())
- }
- return peers, nil
- }
- // configure peer handlers after rafthttp.Transport started
- func (e *Etcd) servePeers() (err error) {
- ph := etcdhttp.NewPeerHandler(e.Server)
- var peerTLScfg *tls.Config
- if !e.cfg.PeerTLSInfo.Empty() {
- if peerTLScfg, err = e.cfg.PeerTLSInfo.ServerConfig(); err != nil {
- return err
- }
- }
- for _, p := range e.Peers {
- gs := v3rpc.Server(e.Server, peerTLScfg)
- m := cmux.New(p.Listener)
- go gs.Serve(m.Match(cmux.HTTP2()))
- srv := &http.Server{
- Handler: grpcHandlerFunc(gs, ph),
- ReadTimeout: 5 * time.Minute,
- ErrorLog: defaultLog.New(ioutil.Discard, "", 0), // do not log user error
- }
- go srv.Serve(m.Match(cmux.Any()))
- p.serve = func() error { return m.Serve() }
- p.close = func(ctx context.Context) error {
- // gracefully shutdown http.Server
- // close open listeners, idle connections
- // until context cancel or time-out
- stopServers(ctx, &servers{secure: peerTLScfg != nil, grpc: gs, http: srv})
- return nil
- }
- }
- // start peer servers in a goroutine
- for _, pl := range e.Peers {
- go func(l *peerListener) {
- e.errHandler(l.serve())
- }(pl)
- }
- return nil
- }
- func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) {
- if err = updateCipherSuites(&cfg.ClientTLSInfo, cfg.CipherSuites); err != nil {
- return nil, err
- }
- if err = cfg.ClientSelfCert(); err != nil {
- plog.Fatalf("could not get certs (%v)", err)
- }
- if cfg.EnablePprof {
- plog.Infof("pprof is enabled under %s", debugutil.HTTPPrefixPProf)
- }
- sctxs = make(map[string]*serveCtx)
- for _, u := range cfg.LCUrls {
- sctx := newServeCtx()
- if u.Scheme == "http" || u.Scheme == "unix" {
- if !cfg.ClientTLSInfo.Empty() {
- plog.Warningf("The scheme of client url %s is HTTP while peer key/cert files are presented. Ignored key/cert files.", u.String())
- }
- if cfg.ClientTLSInfo.ClientCertAuth {
- plog.Warningf("The scheme of client url %s is HTTP while client cert auth (--client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String())
- }
- }
- if (u.Scheme == "https" || u.Scheme == "unixs") && cfg.ClientTLSInfo.Empty() {
- return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPs scheme", u.String())
- }
- proto := "tcp"
- addr := u.Host
- if u.Scheme == "unix" || u.Scheme == "unixs" {
- proto = "unix"
- addr = u.Host + u.Path
- }
- sctx.secure = u.Scheme == "https" || u.Scheme == "unixs"
- sctx.insecure = !sctx.secure
- if oldctx := sctxs[addr]; oldctx != nil {
- oldctx.secure = oldctx.secure || sctx.secure
- oldctx.insecure = oldctx.insecure || sctx.insecure
- continue
- }
- if sctx.l, err = net.Listen(proto, addr); err != nil {
- return nil, err
- }
- // net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking
- // hosts that disable ipv6. So, use the address given by the user.
- sctx.addr = addr
- if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil {
- if fdLimit <= reservedInternalFDNum {
- plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum)
- }
- sctx.l = transport.LimitListener(sctx.l, int(fdLimit-reservedInternalFDNum))
- }
- if proto == "tcp" {
- if sctx.l, err = transport.NewKeepAliveListener(sctx.l, "tcp", nil); err != nil {
- return nil, err
- }
- }
- plog.Info("listening for client requests on ", u.Host)
- defer func() {
- if err != nil {
- sctx.l.Close()
- plog.Info("stopping listening for client requests on ", u.Host)
- }
- }()
- for k := range cfg.UserHandlers {
- sctx.userHandlers[k] = cfg.UserHandlers[k]
- }
- sctx.serviceRegister = cfg.ServiceRegister
- if cfg.EnablePprof || cfg.Debug {
- sctx.registerPprof()
- }
- if cfg.Debug {
- sctx.registerTrace()
- }
- sctxs[addr] = sctx
- }
- return sctxs, nil
- }
- func (e *Etcd) serveClients() (err error) {
- var ctlscfg *tls.Config
- if !e.cfg.ClientTLSInfo.Empty() {
- plog.Infof("ClientTLS: %s", e.cfg.ClientTLSInfo)
- if ctlscfg, err = e.cfg.ClientTLSInfo.ServerConfig(); err != nil {
- return err
- }
- }
- if e.cfg.CorsInfo.String() != "" {
- plog.Infof("cors = %s", e.cfg.CorsInfo)
- }
- // Start a client server goroutine for each listen address
- var h http.Handler
- if e.Config().EnableV2 {
- h = v2http.NewClientHandler(e.Server, e.Server.Cfg.ReqTimeout())
- } else {
- mux := http.NewServeMux()
- etcdhttp.HandleBasic(mux, e.Server)
- h = mux
- }
- h = http.Handler(&cors.CORSHandler{Handler: h, Info: e.cfg.CorsInfo})
- gopts := []grpc.ServerOption{}
- if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) {
- gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
- MinTime: e.cfg.GRPCKeepAliveMinTime,
- PermitWithoutStream: false,
- }))
- }
- if e.cfg.GRPCKeepAliveInterval > time.Duration(0) &&
- e.cfg.GRPCKeepAliveTimeout > time.Duration(0) {
- gopts = append(gopts, grpc.KeepaliveParams(keepalive.ServerParameters{
- Time: e.cfg.GRPCKeepAliveInterval,
- Timeout: e.cfg.GRPCKeepAliveTimeout,
- }))
- }
- // start client servers in a goroutine
- for _, sctx := range e.sctxs {
- go func(s *serveCtx) {
- e.errHandler(s.serve(e.Server, ctlscfg, h, e.errHandler, gopts...))
- }(sctx)
- }
- return nil
- }
- func (e *Etcd) errHandler(err error) {
- select {
- case <-e.stopc:
- return
- default:
- }
- select {
- case <-e.stopc:
- case e.errc <- err:
- }
- }
|