// Copyright 2016 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package embed import ( "context" "crypto/tls" "fmt" "io/ioutil" defaultLog "log" "net" "net/http" "net/url" "sort" "strconv" "sync" "time" "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/api/etcdhttp" "github.com/coreos/etcd/etcdserver/api/v2http" "github.com/coreos/etcd/etcdserver/api/v2v3" "github.com/coreos/etcd/etcdserver/api/v3client" "github.com/coreos/etcd/etcdserver/api/v3rpc" "github.com/coreos/etcd/pkg/debugutil" runtimeutil "github.com/coreos/etcd/pkg/runtime" "github.com/coreos/etcd/pkg/transport" "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/rafthttp" "github.com/coreos/pkg/capnslog" "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/soheilhy/cmux" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/keepalive" ) var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "embed") const ( // internal fd usage includes disk usage and transport usage. // To read/write snapshot, snap pkg needs 1. In normal case, wal pkg needs // at most 2 to read/lock/write WALs. One case that it needs to 2 is to // read all logs after some snapshot index, which locates at the end of // the second last and the head of the last. For purging, it needs to read // directory, so it needs 1. For fd monitor, it needs 1. // For transport, rafthttp builds two long-polling connections and at most // four temporary connections with each member. There are at most 9 members // in a cluster, so it should reserve 96. // For the safety, we set the total reserved number to 150. reservedInternalFDNum = 150 ) // Etcd contains a running etcd server and its listeners. type Etcd struct { Peers []*peerListener Clients []net.Listener // a map of contexts for the servers that serves client requests. sctxs map[string]*serveCtx metricsListeners []net.Listener Server *etcdserver.EtcdServer cfg Config stopc chan struct{} errc chan error closeOnce sync.Once } type peerListener struct { net.Listener serve func() error close func(context.Context) error } // StartEtcd launches the etcd server and HTTP handlers for client/server communication. // The returned Etcd.Server is not guaranteed to have joined the cluster. Wait // on the Etcd.Server.ReadyNotify() channel to know when it completes and is ready for use. func StartEtcd(inCfg *Config) (e *Etcd, err error) { if err = inCfg.Validate(); err != nil { return nil, err } serving := false e = &Etcd{cfg: *inCfg, stopc: make(chan struct{})} cfg := &e.cfg defer func() { if e == nil || err == nil { return } if !serving { // errored before starting gRPC server for serveCtx.serversC for _, sctx := range e.sctxs { close(sctx.serversC) } } e.Close() e = nil }() if e.cfg.logger != nil { e.cfg.logger.Info( "configuring peer listeners", zap.Strings("listen-peer-urls", e.cfg.getLPURLs()), ) } if e.Peers, err = configurePeerListeners(cfg); err != nil { return e, err } if e.cfg.logger != nil { e.cfg.logger.Info( "configuring client listeners", zap.Strings("listen-client-urls", e.cfg.getLCURLs()), ) } if e.sctxs, err = configureClientListeners(cfg); err != nil { return e, err } for _, sctx := range e.sctxs { e.Clients = append(e.Clients, sctx.l) } var ( urlsmap types.URLsMap token string ) memberInitialized := true if !isMemberInitialized(cfg) { memberInitialized = false urlsmap, token, err = cfg.PeerURLsMapAndToken("etcd") if err != nil { return e, fmt.Errorf("error setting up initial cluster: %v", err) } } // AutoCompactionRetention defaults to "0" if not set. if len(cfg.AutoCompactionRetention) == 0 { cfg.AutoCompactionRetention = "0" } autoCompactionRetention, err := parseCompactionRetention(cfg.AutoCompactionMode, cfg.AutoCompactionRetention) if err != nil { return e, err } srvcfg := etcdserver.ServerConfig{ Name: cfg.Name, ClientURLs: cfg.ACUrls, PeerURLs: cfg.APUrls, DataDir: cfg.Dir, DedicatedWALDir: cfg.WalDir, SnapCount: cfg.SnapCount, MaxSnapFiles: cfg.MaxSnapFiles, MaxWALFiles: cfg.MaxWalFiles, InitialPeerURLsMap: urlsmap, InitialClusterToken: token, DiscoveryURL: cfg.Durl, DiscoveryProxy: cfg.Dproxy, NewCluster: cfg.IsNewCluster(), PeerTLSInfo: cfg.PeerTLSInfo, TickMs: cfg.TickMs, ElectionTicks: cfg.ElectionTicks(), InitialElectionTickAdvance: cfg.InitialElectionTickAdvance, AutoCompactionRetention: autoCompactionRetention, AutoCompactionMode: cfg.AutoCompactionMode, QuotaBackendBytes: cfg.QuotaBackendBytes, MaxTxnOps: cfg.MaxTxnOps, MaxRequestBytes: cfg.MaxRequestBytes, StrictReconfigCheck: cfg.StrictReconfigCheck, ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth, AuthToken: cfg.AuthToken, CORS: cfg.CORS, HostWhitelist: cfg.HostWhitelist, InitialCorruptCheck: cfg.ExperimentalInitialCorruptCheck, CorruptCheckTime: cfg.ExperimentalCorruptCheckTime, PreVote: cfg.PreVote, Logger: cfg.logger, LoggerConfig: cfg.loggerConfig, Debug: cfg.Debug, ForceNewCluster: cfg.ForceNewCluster, } if e.Server, err = etcdserver.NewServer(srvcfg); err != nil { return e, err } if len(e.cfg.CORS) > 0 { ss := make([]string, 0, len(e.cfg.CORS)) for v := range e.cfg.CORS { ss = append(ss, v) } sort.Strings(ss) if e.cfg.logger != nil { e.cfg.logger.Info("configured CORS", zap.Strings("cors", ss)) } else { plog.Infof("%s starting with cors %q", e.Server.ID(), ss) } } if len(e.cfg.HostWhitelist) > 0 { ss := make([]string, 0, len(e.cfg.HostWhitelist)) for v := range e.cfg.HostWhitelist { ss = append(ss, v) } sort.Strings(ss) if e.cfg.logger != nil { e.cfg.logger.Info("configured host whitelist", zap.Strings("hosts", ss)) } else { plog.Infof("%s starting with host whitelist %q", e.Server.ID(), ss) } } // buffer channel so goroutines on closed connections won't wait forever e.errc = make(chan error, len(e.Peers)+len(e.Clients)+2*len(e.sctxs)) // newly started member ("memberInitialized==false") // does not need corruption check if memberInitialized { if err = e.Server.CheckInitialHashKV(); err != nil { // set "EtcdServer" to nil, so that it does not block on "EtcdServer.Close()" // (nothing to close since rafthttp transports have not been started) e.Server = nil return e, err } } e.Server.Start() if err = e.servePeers(); err != nil { return e, err } if err = e.serveClients(); err != nil { return e, err } if err = e.serveMetrics(); err != nil { return e, err } if e.cfg.logger != nil { e.cfg.logger.Info( "now serving peer/client/metrics", zap.String("local-member-id", e.Server.ID().String()), zap.Strings("initial-advertise-peer-urls", e.cfg.getAPURLs()), zap.Strings("listen-peer-urls", e.cfg.getLPURLs()), zap.Strings("advertise-client-urls", e.cfg.getACURLs()), zap.Strings("listen-client-urls", e.cfg.getLCURLs()), zap.Strings("listen-metrics-urls", e.cfg.getMetricsURLs()), ) } serving = true return e, nil } // Config returns the current configuration. func (e *Etcd) Config() Config { return e.cfg } // Close gracefully shuts down all servers/listeners. // Client requests will be terminated with request timeout. // After timeout, enforce remaning requests be closed immediately. func (e *Etcd) Close() { e.closeOnce.Do(func() { close(e.stopc) }) // close client requests with request timeout timeout := 2 * time.Second if e.Server != nil { timeout = e.Server.Cfg.ReqTimeout() } for _, sctx := range e.sctxs { for ss := range sctx.serversC { ctx, cancel := context.WithTimeout(context.Background(), timeout) stopServers(ctx, ss) cancel() } } for _, sctx := range e.sctxs { sctx.cancel() } for i := range e.Clients { if e.Clients[i] != nil { e.Clients[i].Close() } } for i := range e.metricsListeners { e.metricsListeners[i].Close() } // close rafthttp transports if e.Server != nil { e.Server.Stop() } // close all idle connections in peer handler (wait up to 1-second) for i := range e.Peers { if e.Peers[i] != nil && e.Peers[i].close != nil { ctx, cancel := context.WithTimeout(context.Background(), time.Second) e.Peers[i].close(ctx) cancel() } } lg := e.GetLogger() if lg != nil { lg.Sync() } } func stopServers(ctx context.Context, ss *servers) { shutdownNow := func() { // first, close the http.Server ss.http.Shutdown(ctx) // then close grpc.Server; cancels all active RPCs ss.grpc.Stop() } // do not grpc.Server.GracefulStop with TLS enabled etcd server // See https://github.com/grpc/grpc-go/issues/1384#issuecomment-317124531 // and https://github.com/coreos/etcd/issues/8916 if ss.secure { shutdownNow() return } ch := make(chan struct{}) go func() { defer close(ch) // close listeners to stop accepting new connections, // will block on any existing transports ss.grpc.GracefulStop() }() // wait until all pending RPCs are finished select { case <-ch: case <-ctx.Done(): // took too long, manually close open transports // e.g. watch streams shutdownNow() // concurrent GracefulStop should be interrupted <-ch } } func (e *Etcd) Err() <-chan error { return e.errc } func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) { if err = cfg.PeerSelfCert(); err != nil { if cfg.logger != nil { cfg.logger.Fatal("failed to get peer self-signed certs", zap.Error(err)) } else { plog.Fatalf("could not get certs (%v)", err) } } if !cfg.PeerTLSInfo.Empty() { if cfg.logger != nil { cfg.logger.Info("starting with peer TLS", zap.String("tls-info", fmt.Sprintf("%+v", cfg.PeerTLSInfo))) } else { plog.Infof("peerTLS: %s", cfg.PeerTLSInfo) } } peers = make([]*peerListener, len(cfg.LPUrls)) defer func() { if err == nil { return } for i := range peers { if peers[i] != nil && peers[i].close != nil { if cfg.logger != nil { cfg.logger.Warn( "closing peer listener", zap.String("address", cfg.LPUrls[i].String()), zap.Error(err), ) } else { plog.Info("stopping listening for peers on ", cfg.LPUrls[i].String()) } ctx, cancel := context.WithTimeout(context.Background(), time.Second) peers[i].close(ctx) cancel() } } }() for i, u := range cfg.LPUrls { if u.Scheme == "http" { if !cfg.PeerTLSInfo.Empty() { if cfg.logger != nil { cfg.logger.Warn("scheme is HTTP while key and cert files are present; ignoring key and cert files", zap.String("peer-url", u.String())) } else { plog.Warningf("The scheme of peer url %s is HTTP while peer key/cert files are presented. Ignored peer key/cert files.", u.String()) } } if cfg.PeerTLSInfo.ClientCertAuth { if cfg.logger != nil { cfg.logger.Warn("scheme is HTTP while --peer-client-cert-auth is enabled; ignoring client cert auth for this URL", zap.String("peer-url", u.String())) } else { plog.Warningf("The scheme of peer url %s is HTTP while client cert auth (--peer-client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String()) } } } peers[i] = &peerListener{close: func(context.Context) error { return nil }} peers[i].Listener, err = rafthttp.NewListener(u, &cfg.PeerTLSInfo) if err != nil { return nil, err } // once serve, overwrite with 'http.Server.Shutdown' peers[i].close = func(context.Context) error { return peers[i].Listener.Close() } } return peers, nil } // configure peer handlers after rafthttp.Transport started func (e *Etcd) servePeers() (err error) { ph := etcdhttp.NewPeerHandler(e.Server) var peerTLScfg *tls.Config if !e.cfg.PeerTLSInfo.Empty() { if peerTLScfg, err = e.cfg.PeerTLSInfo.ServerConfig(); err != nil { return err } } for _, p := range e.Peers { u := p.Listener.Addr().String() gs := v3rpc.Server(e.Server, peerTLScfg) m := cmux.New(p.Listener) go gs.Serve(m.Match(cmux.HTTP2())) srv := &http.Server{ Handler: grpcHandlerFunc(gs, ph), ReadTimeout: 5 * time.Minute, ErrorLog: defaultLog.New(ioutil.Discard, "", 0), // do not log user error } go srv.Serve(m.Match(cmux.Any())) p.serve = func() error { return m.Serve() } p.close = func(ctx context.Context) error { // gracefully shutdown http.Server // close open listeners, idle connections // until context cancel or time-out if e.cfg.logger != nil { e.cfg.logger.Info( "stopping serving peer traffic", zap.String("address", u), ) } stopServers(ctx, &servers{secure: peerTLScfg != nil, grpc: gs, http: srv}) if e.cfg.logger != nil { e.cfg.logger.Info( "stopped serving peer traffic", zap.String("address", u), ) } return nil } } // start peer servers in a goroutine for _, pl := range e.Peers { go func(l *peerListener) { u := l.Addr().String() if e.cfg.logger != nil { e.cfg.logger.Info( "serving peer traffic", zap.String("address", u), ) } else { plog.Info("listening for peers on ", u) } e.errHandler(l.serve()) }(pl) } return nil } func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) { if err = cfg.ClientSelfCert(); err != nil { if cfg.logger != nil { cfg.logger.Fatal("failed to get client self-signed certs", zap.Error(err)) } else { plog.Fatalf("could not get certs (%v)", err) } } if cfg.EnablePprof { if cfg.logger != nil { cfg.logger.Info("pprof is enabled", zap.String("path", debugutil.HTTPPrefixPProf)) } else { plog.Infof("pprof is enabled under %s", debugutil.HTTPPrefixPProf) } } sctxs = make(map[string]*serveCtx) for _, u := range cfg.LCUrls { sctx := newServeCtx(cfg.logger) if u.Scheme == "http" || u.Scheme == "unix" { if !cfg.ClientTLSInfo.Empty() { if cfg.logger != nil { cfg.logger.Warn("scheme is HTTP while key and cert files are present; ignoring key and cert files", zap.String("client-url", u.String())) } else { plog.Warningf("The scheme of client url %s is HTTP while peer key/cert files are presented. Ignored key/cert files.", u.String()) } } if cfg.ClientTLSInfo.ClientCertAuth { if cfg.logger != nil { cfg.logger.Warn("scheme is HTTP while --client-cert-auth is enabled; ignoring client cert auth for this URL", zap.String("client-url", u.String())) } else { plog.Warningf("The scheme of client url %s is HTTP while client cert auth (--client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String()) } } } if (u.Scheme == "https" || u.Scheme == "unixs") && cfg.ClientTLSInfo.Empty() { return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPs scheme", u.String()) } network := "tcp" addr := u.Host if u.Scheme == "unix" || u.Scheme == "unixs" { network = "unix" addr = u.Host + u.Path } sctx.network = network sctx.secure = u.Scheme == "https" || u.Scheme == "unixs" sctx.insecure = !sctx.secure if oldctx := sctxs[addr]; oldctx != nil { oldctx.secure = oldctx.secure || sctx.secure oldctx.insecure = oldctx.insecure || sctx.insecure continue } if sctx.l, err = net.Listen(network, addr); err != nil { return nil, err } // net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking // hosts that disable ipv6. So, use the address given by the user. sctx.addr = addr if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil { if fdLimit <= reservedInternalFDNum { if cfg.logger != nil { cfg.logger.Fatal( "file descriptor limit of etcd process is too low; please set higher", zap.Uint64("limit", fdLimit), zap.Int("recommended-limit", reservedInternalFDNum), ) } else { plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum) } } sctx.l = transport.LimitListener(sctx.l, int(fdLimit-reservedInternalFDNum)) } if network == "tcp" { if sctx.l, err = transport.NewKeepAliveListener(sctx.l, network, nil); err != nil { return nil, err } } defer func() { if err == nil { return } sctx.l.Close() if cfg.logger != nil { cfg.logger.Warn( "closing peer listener", zap.String("address", u.Host), zap.Error(err), ) } else { plog.Info("stopping listening for client requests on ", u.Host) } }() for k := range cfg.UserHandlers { sctx.userHandlers[k] = cfg.UserHandlers[k] } sctx.serviceRegister = cfg.ServiceRegister if cfg.EnablePprof || cfg.Debug { sctx.registerPprof() } if cfg.Debug { sctx.registerTrace() } sctxs[addr] = sctx } return sctxs, nil } func (e *Etcd) serveClients() (err error) { if !e.cfg.ClientTLSInfo.Empty() { if e.cfg.logger != nil { e.cfg.logger.Info( "starting with client TLS", zap.String("tls-info", fmt.Sprintf("%+v", e.cfg.ClientTLSInfo)), ) } else { plog.Infof("ClientTLS: %s", e.cfg.ClientTLSInfo) } } // Start a client server goroutine for each listen address var h http.Handler if e.Config().EnableV2 { if len(e.Config().ExperimentalEnableV2V3) > 0 { srv := v2v3.NewServer(e.cfg.logger, v3client.New(e.Server), e.cfg.ExperimentalEnableV2V3) h = v2http.NewClientHandler(srv, e.Server.Cfg.ReqTimeout()) } else { h = v2http.NewClientHandler(e.Server, e.Server.Cfg.ReqTimeout()) } } else { mux := http.NewServeMux() etcdhttp.HandleBasic(mux, e.Server) h = mux } gopts := []grpc.ServerOption{} if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) { gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ MinTime: e.cfg.GRPCKeepAliveMinTime, PermitWithoutStream: false, })) } if e.cfg.GRPCKeepAliveInterval > time.Duration(0) && e.cfg.GRPCKeepAliveTimeout > time.Duration(0) { gopts = append(gopts, grpc.KeepaliveParams(keepalive.ServerParameters{ Time: e.cfg.GRPCKeepAliveInterval, Timeout: e.cfg.GRPCKeepAliveTimeout, })) } // start client servers in a goroutine for _, sctx := range e.sctxs { go func(s *serveCtx) { e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, h, e.errHandler, gopts...)) }(sctx) } return nil } func (e *Etcd) serveMetrics() (err error) { if e.cfg.Metrics == "extensive" { grpc_prometheus.EnableHandlingTimeHistogram() } if len(e.cfg.ListenMetricsUrls) > 0 { metricsMux := http.NewServeMux() etcdhttp.HandleMetricsHealth(metricsMux, e.Server) for _, murl := range e.cfg.ListenMetricsUrls { tlsInfo := &e.cfg.ClientTLSInfo if murl.Scheme == "http" { tlsInfo = nil } ml, err := transport.NewListener(murl.Host, murl.Scheme, tlsInfo) if err != nil { return err } e.metricsListeners = append(e.metricsListeners, ml) go func(u url.URL, ln net.Listener) { if e.cfg.logger != nil { e.cfg.logger.Info( "serving metrics", zap.String("address", u.String()), ) } else { plog.Info("listening for metrics on ", u.String()) } e.errHandler(http.Serve(ln, metricsMux)) }(murl, ml) } } return nil } func (e *Etcd) errHandler(err error) { select { case <-e.stopc: return default: } select { case <-e.stopc: case e.errc <- err: } } // GetLogger returns the logger. func (e *Etcd) GetLogger() *zap.Logger { e.cfg.loggerMu.RLock() l := e.cfg.logger e.cfg.loggerMu.RUnlock() return l } func parseCompactionRetention(mode, retention string) (ret time.Duration, err error) { h, err := strconv.Atoi(retention) if err == nil { switch mode { case CompactorModeRevision: ret = time.Duration(int64(h)) case CompactorModePeriodic: ret = time.Duration(int64(h)) * time.Hour } } else { // periodic compaction ret, err = time.ParseDuration(retention) if err != nil { return 0, fmt.Errorf("error parsing CompactionRetention: %v", err) } } return ret, nil }