Merge pull request #9572 from gyuho/eee

functional: run tests with embedded etcd, add logger field
release-3.4
Gyuho Lee 2018-04-16 19:09:45 -07:00 committed by GitHub
commit 46e19d21a1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
121 changed files with 5101 additions and 1478 deletions

View File

@ -33,7 +33,7 @@ See [code changes](https://github.com/coreos/etcd/compare/v3.3.0...v3.4.0) and [
- Make etcd server return `raft.ErrProposalDropped` on internal Raft proposal drop in [v3 applier](https://github.com/coreos/etcd/pull/9549) and [v2 applier](https://github.com/coreos/etcd/pull/9558).
- e.g. a node is removed from cluster, or [`raftpb.MsgProp` arrives at current leader while there is an ongoing leadership transfer](https://github.com/coreos/etcd/issues/8975).
- Add [`snapshot`](https://github.com/coreos/etcd/pull/9118) package for easier snapshot workflow (see [`godoc.org/github.com/etcd/snapshot`](https://godoc.org/github.com/coreos/etcd/snapshot) for more).
- Improve [functional tester](https://github.com/coreos/etcd/tree/master/functional) coverage: [proxy layer to run network fault tests in CI](https://github.com/coreos/etcd/pull/9081), [TLS is enabled both for server and client](https://github.com/coreos/etcd/pull/9534), [liveness mode](https://github.com/coreos/etcd/issues/9230), [shuffle test sequence](https://github.com/coreos/etcd/issues/9381), [membership reconfiguration failure cases](https://github.com/coreos/etcd/pull/9564), [disastrous quorum loss and snapshot recover from a seed member](https://github.com/coreos/etcd/pull/9565).
- Improve [functional tester](https://github.com/coreos/etcd/tree/master/functional) coverage: [proxy layer to run network fault tests in CI](https://github.com/coreos/etcd/pull/9081), [TLS is enabled both for server and client](https://github.com/coreos/etcd/pull/9534), [liveness mode](https://github.com/coreos/etcd/issues/9230), [shuffle test sequence](https://github.com/coreos/etcd/issues/9381), [membership reconfiguration failure cases](https://github.com/coreos/etcd/pull/9564), [disastrous quorum loss and snapshot recover from a seed member](https://github.com/coreos/etcd/pull/9565), [embedded etcd](https://github.com/coreos/etcd/pull/9572).
### Breaking Changes
@ -55,11 +55,6 @@ See [code changes](https://github.com/coreos/etcd/compare/v3.3.0...v3.4.0) and [
- e.g. exit with error on `ETCDCTL_ENDPOINTS=abc.com ETCDCTL_API=3 etcdctl endpoint health --endpoints=def.com`.
- Change [`etcdserverpb.AuthRoleRevokePermissionRequest/key,range_end` fields type from `string` to `bytes`](https://github.com/coreos/etcd/pull/9433).
- Change [`embed.Config.CorsInfo` in `*cors.CORSInfo` type to `embed.Config.CORS` in `map[string]struct{}` type](https://github.com/coreos/etcd/pull/9490).
- Remove [`pkg/cors` package](https://github.com/coreos/etcd/pull/9490).
- Move `"github.com/coreos/etcd/snap"` to [`"github.com/coreos/etcd/raftsnap"`](https://github.com/coreos/etcd/pull/9211).
- Move `"github.com/coreos/etcd/etcdserver/auth"` to [`"github.com/coreos/etcd/etcdserver/v2auth"`](https://github.com/coreos/etcd/pull/9275).
- Move `"github.com/coreos/etcd/error"` to [`"github.com/coreos/etcd/etcdserver/v2error"`](https://github.com/coreos/etcd/pull/9274).
- Move `"github.com/coreos/etcd/store"` to [`"github.com/coreos/etcd/etcdserver/v2store"`](https://github.com/coreos/etcd/pull/9274).
- Change v3 `etcdctl snapshot` exit codes with [`snapshot` package](https://github.com/coreos/etcd/pull/9118/commits/df689f4280e1cce4b9d61300be13ca604d41670a).
- Exit on error with exit code 1 (no more exit code 5 or 6 on `snapshot save/restore` commands).
- Migrate dependency management tool from `glide` to [`golang/dep`](https://github.com/coreos/etcd/pull/9155).
@ -69,12 +64,25 @@ See [code changes](https://github.com/coreos/etcd/compare/v3.3.0...v3.4.0) and [
- Now `go get/install/build` on `etcd` packages (e.g. `clientv3`, `tools/benchmark`) enforce builds with etcd `vendor` directory.
- Replace [gRPC gateway](https://github.com/grpc-ecosystem/grpc-gateway) endpoint `/v3beta` with [`/v3`](https://github.com/coreos/etcd/pull/9298).
- Deprecated [`/v3alpha`](https://github.com/coreos/etcd/pull/9298).
- Change [`wal` package function signatures](https://github.com/coreos/etcd/pull/9572) to support [structured logger and logging to file](https://github.com/coreos/etcd/issues/9438) in server-side.
- Previously, `Open(dirpath string, snap walpb.Snapshot) (*WAL, error)`, now `Open(lg *zap.Logger, dirpath string, snap walpb.Snapshot) (*WAL, error)`.
- Previously, `OpenForRead(dirpath string, snap walpb.Snapshot) (*WAL, error)`, now `OpenForRead(lg *zap.Logger, dirpath string, snap walpb.Snapshot) (*WAL, error)`.
- Previously, `Repair(dirpath string) bool`, now `Repair(lg *zap.Logger, dirpath string) bool`.
- Previously, `Create(dirpath string, metadata []byte) (*WAL, error)`, now `Create(lg *zap.Logger, dirpath string, metadata []byte) (*WAL, error)`.
- Remove [`embed.Config.SetupLogging`](https://github.com/coreos/etcd/pull/9572).
- Now logger is set up automatically based on [`embed.Config.Logger`, `embed.Config.LogOutput`, `embed.Config.Debug` fields](https://github.com/coreos/etcd/pull/9572).
- Remove [`pkg/cors` package](https://github.com/coreos/etcd/pull/9490).
- Move `"github.com/coreos/etcd/snap"` to [`"github.com/coreos/etcd/raftsnap"`](https://github.com/coreos/etcd/pull/9211).
- Move `"github.com/coreos/etcd/etcdserver/auth"` to [`"github.com/coreos/etcd/etcdserver/v2auth"`](https://github.com/coreos/etcd/pull/9275).
- Move `"github.com/coreos/etcd/error"` to [`"github.com/coreos/etcd/etcdserver/v2error"`](https://github.com/coreos/etcd/pull/9274).
- Move `"github.com/coreos/etcd/store"` to [`"github.com/coreos/etcd/etcdserver/v2store"`](https://github.com/coreos/etcd/pull/9274).
### Dependency
- Upgrade [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases) from [**`v1.7.5`**](https://github.com/grpc/grpc-go/releases/tag/v1.7.5) to [**`v1.11.1`**](TODO).
- Upgrade [`github.com/ugorji/go/codec`](https://github.com/ugorji/go) to [**`v1.1.1`**](https://github.com/ugorji/go/releases/tag/v1.1.1), and [regenerate v2 `client`](https://github.com/coreos/etcd/pull/9494).
- Upgrade [`github.com/soheilhy/cmux`](https://github.com/soheilhy/cmux/releases) from [**`v0.1.3`**](https://github.com/soheilhy/cmux/releases/tag/v0.1.3) to [**`v0.1.4`**](https://github.com/soheilhy/cmux/releases/tag/v0.1.4).
- Upgrade [`github.com/google/btree`](https://github.com/google/btree/releases) from [**`google/btree@925471ac9`**](https://github.com/google/btree/commit/925471ac9e2131377a91e1595defec898166fe49) to [**`google/btree@e89373fe6`**](https://github.com/google/btree/commit/e89373fe6b4a7413d7acd6da1725b83ef713e6e4).
- Upgrade [`github.com/spf13/cobra`](https://github.com/spf13/cobra/releases) from [**`spf13/cobra@1c44ec8d3`**](https://github.com/spf13/cobra/commit/1c44ec8d3f1552cac48999f9306da23c4d8a288b) to [**`spf13/cobra@cd30c2a7e`**](https://github.com/spf13/cobra/commit/cd30c2a7e91a1d63fd9a0027accf18a681e9d50b).
- Upgrade [`github.com/spf13/pflag`](https://github.com/spf13/pflag/releases) from [**`v1.0.0`**](https://github.com/spf13/pflag/releases/tag/v1.0.0) to [**`spf13/pflag@1ce0cc6db`**](https://github.com/spf13/pflag/commit/1ce0cc6db4029d97571db82f85092fccedb572ce).
@ -129,11 +137,18 @@ See [security doc](https://github.com/coreos/etcd/blob/master/Documentation/op-g
- If `--discovery-srv-name="foo"`, then query `_etcd-server-ssl-foo._tcp.[YOUR_HOST]` and `_etcd-server-foo._tcp.[YOUR_HOST]`.
- Useful for operating multiple etcd clusters under the same domain.
- Support [`etcd --cors`](https://github.com/coreos/etcd/pull/9490) in v3 HTTP requests (gRPC gateway).
- Add [`--logger`](https://github.com/coreos/etcd/pull/9572) flag to support [structured logger and logging to file](https://github.com/coreos/etcd/issues/9438) in server-side.
- e.g. `--logger=capnslog --log-output=default` is the default setting and same as previous etcd server logging format.
- TODO: `--logger=zap` is experimental, and journald logging may not work when etcd runs as PID 1.
- e.g. `--logger=zap --log-output=/tmp/test.log` will log server operations with [JSON-encoded format](TODO) and writes logs to the specified file `/tmp/test.log`.
- e.g. `--logger=zap --log-output=default` will log server operations with [JSON-encoded format](TODO) and writes logs to `os.Stderr` (detect systemd journald TODO).
- e.g. `--logger=zap --log-output=stderr` will log server operations with [JSON-encoded format](TODO) and writes logs to `os.Stderr` (bypass journald TODO).
- e.g. `--logger=zap --log-output=stdout` will log server operations with [JSON-encoded format](TODO) and writes logs to `os.Stdout` (bypass journald TODO).
- e.g. `--logger=zap --log-output=discard` will discard all server logs.
### Added: `embed`
- Add [`embed.Config.Logger`](https://github.com/coreos/etcd/pull/9518) to use [structured logger `zap`](https://github.com/uber-go/zap) in server-side.
- make this configurable...
- Add [`embed.Config.Logger`](https://github.com/coreos/etcd/pull/9518) to support [structured logger `zap`](https://github.com/uber-go/zap) in server-side.
- Define [`embed.CompactorModePeriodic`](https://godoc.org/github.com/coreos/etcd/embed#pkg-variables) for `compactor.ModePeriodic`.
- Define [`embed.CompactorModeRevision`](https://godoc.org/github.com/coreos/etcd/embed#pkg-variables) for `compactor.ModeRevision`.

View File

@ -26,7 +26,7 @@ Highlighted breaking changes in 3.4.
+etcd --peer-trusted-ca-file ca-peer.crt
```
#### Change in ``pkg/transport`
#### Change in `pkg/transport`
Deprecated `pkg/transport.TLSInfo.CAFile` field.
@ -45,6 +45,40 @@ if err != nil {
}
```
#### Change in `wal`
Changed `wal` function signatures to support structured logger.
```diff
import "github.com/coreos/etcd/wal"
+import "go.uber.org/zap"
+lg, _ = zap.NewProduction()
-wal.Open(dirpath, snap)
+wal.Open(lg, dirpath, snap)
-wal.OpenForRead(dirpath, snap)
+wal.OpenForRead(lg, dirpath, snap)
-wal.Repair(dirpath)
+wal.Repair(lg, dirpath)
-wal.Create(dirpath, metadata)
+wal.Create(lg, dirpath, metadata)
```
#### Change in `embed.Etcd`
`embed.Config.SetupLogging` has been removed in order to prevent wrong logging configuration, and now set up automatically.
```diff
import "github.com/coreos/etcd/embed"
cfg := &embed.Config{Debug: false}
-cfg.SetupLogging()
```
### Server upgrade checklists
#### Upgrade requirements

View File

@ -26,6 +26,7 @@ clean:
rm -rf ./gopath
rm -rf ./gopath.proto
rm -rf ./release
rm -f ./snapshot/localhost:*
rm -f ./integration/127.0.0.1:* ./integration/localhost:*
rm -f ./clientv3/integration/127.0.0.1:* ./clientv3/integration/localhost:*
rm -f ./clientv3/ordering/127.0.0.1:* ./clientv3/ordering/localhost:*

View File

@ -30,6 +30,7 @@ import (
"github.com/coreos/etcd/mvcc/backend"
"github.com/coreos/pkg/capnslog"
"go.uber.org/zap"
"golang.org/x/crypto/bcrypt"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
@ -1047,7 +1048,7 @@ func decomposeOpts(optstr string) (string, map[string]string, error) {
}
func NewTokenProvider(tokenOpts string, indexWaiter func(uint64) <-chan struct{}) (TokenProvider, error) {
func NewTokenProvider(lg *zap.Logger, tokenOpts string, indexWaiter func(uint64) <-chan struct{}) (TokenProvider, error) {
tokenType, typeSpecificOpts, err := decomposeOpts(tokenOpts)
if err != nil {
return nil, ErrInvalidAuthOpts
@ -1055,14 +1056,22 @@ func NewTokenProvider(tokenOpts string, indexWaiter func(uint64) <-chan struct{}
switch tokenType {
case "simple":
plog.Warningf("simple token is not cryptographically signed")
if lg != nil {
lg.Warn("simple token is not cryptographically signed")
} else {
plog.Warningf("simple token is not cryptographically signed")
}
return newTokenProviderSimple(indexWaiter), nil
case "jwt":
return newTokenProviderJWT(typeSpecificOpts)
case "":
return newTokenProviderNop()
default:
plog.Errorf("unknown token type: %s", tokenType)
if lg != nil {
lg.Warn("unknown token type", zap.String("type", tokenType), zap.Error(ErrInvalidAuthOpts))
} else {
plog.Errorf("unknown token type: %s", tokenType)
}
return nil, ErrInvalidAuthOpts
}
}

View File

@ -29,6 +29,7 @@ import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/mvcc/backend"
"go.uber.org/zap"
"golang.org/x/crypto/bcrypt"
"google.golang.org/grpc/metadata"
)
@ -49,7 +50,7 @@ func TestNewAuthStoreRevision(t *testing.T) {
b, tPath := backend.NewDefaultTmpBackend()
defer os.Remove(tPath)
tp, err := NewTokenProvider("simple", dummyIndexWaiter)
tp, err := NewTokenProvider(zap.NewExample(), "simple", dummyIndexWaiter)
if err != nil {
t.Fatal(err)
}
@ -77,7 +78,7 @@ func TestNewAuthStoreRevision(t *testing.T) {
func setupAuthStore(t *testing.T) (store *authStore, teardownfunc func(t *testing.T)) {
b, tPath := backend.NewDefaultTmpBackend()
tp, err := NewTokenProvider("simple", dummyIndexWaiter)
tp, err := NewTokenProvider(zap.NewExample(), "simple", dummyIndexWaiter)
if err != nil {
t.Fatal(err)
}
@ -514,7 +515,7 @@ func TestAuthInfoFromCtxRace(t *testing.T) {
b, tPath := backend.NewDefaultTmpBackend()
defer os.Remove(tPath)
tp, err := NewTokenProvider("simple", dummyIndexWaiter)
tp, err := NewTokenProvider(zap.NewExample(), "simple", dummyIndexWaiter)
if err != nil {
t.Fatal(err)
}
@ -580,7 +581,7 @@ func TestRecoverFromSnapshot(t *testing.T) {
as.Close()
tp, err := NewTokenProvider("simple", dummyIndexWaiter)
tp, err := NewTokenProvider(zap.NewExample(), "simple", dummyIndexWaiter)
if err != nil {
t.Fatal(err)
}
@ -662,7 +663,7 @@ func TestRolesOrder(t *testing.T) {
b, tPath := backend.NewDefaultTmpBackend()
defer os.Remove(tPath)
tp, err := NewTokenProvider("simple", dummyIndexWaiter)
tp, err := NewTokenProvider(zap.NewExample(), "simple", dummyIndexWaiter)
if err != nil {
t.Fatal(err)
}
@ -708,7 +709,7 @@ func TestAuthInfoFromCtxWithRoot(t *testing.T) {
b, tPath := backend.NewDefaultTmpBackend()
defer os.Remove(tPath)
tp, err := NewTokenProvider("simple", dummyIndexWaiter)
tp, err := NewTokenProvider(zap.NewExample(), "simple", dummyIndexWaiter)
if err != nil {
t.Fatal(err)
}

View File

@ -24,6 +24,8 @@ import (
"testing"
"time"
"go.uber.org/zap"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
"github.com/coreos/etcd/integration"
"github.com/coreos/etcd/lease"
@ -145,7 +147,7 @@ func TestMaintenanceSnapshotErrorInflight(t *testing.T) {
clus.Members[0].Stop(t)
dpath := filepath.Join(clus.Members[0].DataDir, "member", "snap", "db")
b := backend.NewDefaultBackend(dpath)
s := mvcc.NewStore(b, &lease.FakeLessor{}, nil)
s := mvcc.NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
rev := 100000
for i := 2; i <= rev; i++ {
s.Put([]byte(fmt.Sprintf("%10d", i)), bytes.Repeat([]byte("a"), 1024), lease.NoLease)

View File

@ -33,6 +33,8 @@ import (
"github.com/coreos/etcd/raftsnap"
"github.com/coreos/etcd/wal"
"github.com/coreos/etcd/wal/walpb"
"go.uber.org/zap"
)
// A key-value stream backed by raft
@ -201,7 +203,7 @@ func (rc *raftNode) openWAL(snapshot *raftpb.Snapshot) *wal.WAL {
log.Fatalf("raftexample: cannot create dir for wal (%v)", err)
}
w, err := wal.Create(rc.waldir, nil)
w, err := wal.Create(zap.NewExample(), rc.waldir, nil)
if err != nil {
log.Fatalf("raftexample: create wal error (%v)", err)
}
@ -213,7 +215,7 @@ func (rc *raftNode) openWAL(snapshot *raftpb.Snapshot) *wal.WAL {
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
}
log.Printf("loading WAL at term %d and index %d", walsnap.Term, walsnap.Index)
w, err := wal.Open(rc.waldir, walsnap)
w, err := wal.Open(zap.NewExample(), rc.waldir, walsnap)
if err != nil {
log.Fatalf("raftexample: error loading wal (%v)", err)
}
@ -261,7 +263,7 @@ func (rc *raftNode) startRaft() {
log.Fatalf("raftexample: cannot create dir for snapshot (%v)", err)
}
}
rc.snapshotter = raftsnap.New(rc.snapdir)
rc.snapshotter = raftsnap.New(zap.NewExample(), rc.snapdir)
rc.snapshotterReady <- rc.snapshotter
oldwal := wal.Exist(rc.waldir)
@ -291,6 +293,7 @@ func (rc *raftNode) startRaft() {
}
rc.transport = &rafthttp.Transport{
Logger: zap.NewExample(),
ID: types.ID(rc.id),
ClusterID: 0x1000,
Raft: rc,

View File

@ -24,11 +24,14 @@ import (
"os"
"path/filepath"
"strings"
"sync"
"syscall"
"time"
"github.com/coreos/etcd/compactor"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/pkg/flags"
"github.com/coreos/etcd/pkg/logutil"
"github.com/coreos/etcd/pkg/netutil"
"github.com/coreos/etcd/pkg/srv"
"github.com/coreos/etcd/pkg/transport"
@ -107,21 +110,12 @@ func init() {
// Config holds the arguments for configuring an etcd server.
type Config struct {
LPUrls, LCUrls []url.URL
Dir string `json:"data-dir"`
WalDir string `json:"wal-dir"`
MaxSnapFiles uint `json:"max-snapshots"`
MaxWalFiles uint `json:"max-wals"`
Name string `json:"name"`
SnapCount uint64 `json:"snapshot-count"`
// AutoCompactionMode is either 'periodic' or 'revision'.
AutoCompactionMode string `json:"auto-compaction-mode"`
// AutoCompactionRetention is either duration string with time unit
// (e.g. '5m' for 5-minute), or revision unit (e.g. '5000').
// If no time unit is provided and compaction mode is 'periodic',
// the unit defaults to hour. For example, '5' translates into 5-hour.
AutoCompactionRetention string `json:"auto-compaction-retention"`
Name string `json:"name"`
Dir string `json:"data-dir"`
WalDir string `json:"wal-dir"`
SnapCount uint64 `json:"snapshot-count"`
MaxSnapFiles uint `json:"max-snapshots"`
MaxWalFiles uint `json:"max-wals"`
// TickMs is the number of milliseconds between heartbeat ticks.
// TODO: decouple tickMs and heartbeat tick (current heartbeat tick = 1).
@ -132,6 +126,31 @@ type Config struct {
MaxTxnOps uint `json:"max-txn-ops"`
MaxRequestBytes uint `json:"max-request-bytes"`
LPUrls, LCUrls []url.URL
APUrls, ACUrls []url.URL
ClientTLSInfo transport.TLSInfo
ClientAutoTLS bool
PeerTLSInfo transport.TLSInfo
PeerAutoTLS bool
ClusterState string `json:"initial-cluster-state"`
DNSCluster string `json:"discovery-srv"`
DNSClusterServiceName string `json:"discovery-srv-name"`
Dproxy string `json:"discovery-proxy"`
Durl string `json:"discovery"`
InitialCluster string `json:"initial-cluster"`
InitialClusterToken string `json:"initial-cluster-token"`
StrictReconfigCheck bool `json:"strict-reconfig-check"`
EnableV2 bool `json:"enable-v2"`
// AutoCompactionMode is either 'periodic' or 'revision'.
AutoCompactionMode string `json:"auto-compaction-mode"`
// AutoCompactionRetention is either duration string with time unit
// (e.g. '5m' for 5-minute), or revision unit (e.g. '5000').
// If no time unit is provided and compaction mode is 'periodic',
// the unit defaults to hour. For example, '5' translates into 5-hour.
AutoCompactionRetention string `json:"auto-compaction-retention"`
// GRPCKeepAliveMinTime is the minimum interval that a client should
// wait before pinging server. When client pings "too fast", server
// sends goaway and closes the connection (errors: too_many_pings,
@ -147,17 +166,6 @@ type Config struct {
// before closing a non-responsive connection. 0 to disable.
GRPCKeepAliveTimeout time.Duration `json:"grpc-keepalive-timeout"`
APUrls, ACUrls []url.URL
ClusterState string `json:"initial-cluster-state"`
DNSCluster string `json:"discovery-srv"`
DNSClusterServiceName string `json:"discovery-srv-name"`
Dproxy string `json:"discovery-proxy"`
Durl string `json:"discovery"`
InitialCluster string `json:"initial-cluster"`
InitialClusterToken string `json:"initial-cluster-token"`
StrictReconfigCheck bool `json:"strict-reconfig-check"`
EnableV2 bool `json:"enable-v2"`
// PreVote is true to enable Raft Pre-Vote.
// If enabled, Raft runs an additional election phase
// to check whether it would get enough votes to win
@ -165,11 +173,6 @@ type Config struct {
// TODO: enable by default in 3.5.
PreVote bool `json:"pre-vote"`
ClientTLSInfo transport.TLSInfo
ClientAutoTLS bool
PeerTLSInfo transport.TLSInfo
PeerAutoTLS bool
CORS map[string]struct{}
// HostWhitelist lists acceptable hostnames from HTTP client requests.
@ -198,21 +201,6 @@ type Config struct {
// - https://github.com/coreos/etcd/issues/9353
HostWhitelist map[string]struct{}
// Logger logs server-side operations.
// If nil, all logs are discarded.
// TODO: make it configurable with existing logger.
// Currently, only logs TLS transport.
Logger *zap.Logger
Debug bool `json:"debug"`
LogPkgLevels string `json:"log-package-levels"`
LogOutput string `json:"log-output"`
EnablePprof bool `json:"enable-pprof"`
Metrics string `json:"metrics"`
ListenMetricsUrls []url.URL
ListenMetricsUrlsJSON string `json:"listen-metrics-urls"`
// UserHandlers is for registering users handlers and only used for
// embedding etcd into other applications.
// The map key is the route path for the handler, and
@ -235,6 +223,36 @@ type Config struct {
// ForceNewCluster starts a new cluster even if previously started; unsafe.
ForceNewCluster bool `json:"force-new-cluster"`
EnablePprof bool `json:"enable-pprof"`
Metrics string `json:"metrics"`
ListenMetricsUrls []url.URL
ListenMetricsUrlsJSON string `json:"listen-metrics-urls"`
// logger logs server-side operations. The default is nil,
// and "setupLogging" must be called before starting server.
// Do not set logger directly.
loggerMu *sync.RWMutex
logger *zap.Logger
loggerConfig zap.Config
// Logger is logger options: "zap", "capnslog".
// WARN: "capnslog" is being deprecated in v3.5.
Logger string `json:"logger"`
// LogOutput is either:
// - "default" as os.Stderr
// - "stderr" as os.Stderr
// - "stdout" as os.Stdout
// - file path to append server logs to
LogOutput string `json:"log-output"`
// Debug is true, to enable debug level logging.
Debug bool `json:"debug"`
// LogPkgLevels is being deprecated in v3.5.
// Only valid if "logger" option is "capnslog".
// WARN: DO NOT USE THIS!
LogPkgLevels string `json:"log-package-levels"`
}
// configYAML holds the config suitable for yaml parsing
@ -271,7 +289,6 @@ func NewConfig() *Config {
apurl, _ := url.Parse(DefaultInitialAdvertisePeerURLs)
lcurl, _ := url.Parse(DefaultListenClientURLs)
acurl, _ := url.Parse(DefaultAdvertiseClientURLs)
lg, _ := zap.NewProduction()
cfg := &Config{
MaxSnapFiles: DefaultMaxSnapshots,
MaxWalFiles: DefaultMaxWALs,
@ -291,14 +308,19 @@ func NewConfig() *Config {
ClusterState: ClusterStateFlagNew,
InitialClusterToken: "etcd-cluster",
StrictReconfigCheck: DefaultStrictReconfigCheck,
Logger: lg,
LogOutput: DefaultLogOutput,
Metrics: "basic",
EnableV2: DefaultEnableV2,
CORS: map[string]struct{}{"*": {}},
HostWhitelist: map[string]struct{}{"*": {}},
AuthToken: "simple",
PreVote: false, // TODO: enable by default in v3.5
loggerMu: new(sync.RWMutex),
logger: nil,
Logger: "capnslog",
LogOutput: DefaultLogOutput,
Debug: false,
LogPkgLevels: "",
}
cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
return cfg
@ -317,45 +339,171 @@ func logTLSHandshakeFailure(conn *tls.Conn, err error) {
}
}
// SetupLogging initializes etcd logging.
// Must be called after flag parsing.
func (cfg *Config) SetupLogging() {
cfg.ClientTLSInfo.HandshakeFailure = logTLSHandshakeFailure
cfg.PeerTLSInfo.HandshakeFailure = logTLSHandshakeFailure
// GetLogger returns the logger.
func (cfg Config) GetLogger() *zap.Logger {
cfg.loggerMu.RLock()
l := cfg.logger
cfg.loggerMu.RUnlock()
return l
}
capnslog.SetGlobalLogLevel(capnslog.INFO)
if cfg.Debug {
cfg.Logger = zap.NewExample()
capnslog.SetGlobalLogLevel(capnslog.DEBUG)
grpc.EnableTracing = true
// enable info, warning, error
grpclog.SetLoggerV2(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr))
} else {
// only discard info
grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr))
}
if cfg.LogPkgLevels != "" {
repoLog := capnslog.MustRepoLogger("github.com/coreos/etcd")
settings, err := repoLog.ParseLogLevelConfig(cfg.LogPkgLevels)
if err != nil {
plog.Warningf("couldn't parse log level string: %s, continuing with default levels", err.Error())
return
// for testing
var grpcLogOnce = new(sync.Once)
// setupLogging initializes etcd logging.
// Must be called after flag parsing or finishing configuring embed.Config.
func (cfg *Config) setupLogging() error {
switch cfg.Logger {
case "capnslog": // TODO: deprecate this in v3.5
cfg.ClientTLSInfo.HandshakeFailure = logTLSHandshakeFailure
cfg.PeerTLSInfo.HandshakeFailure = logTLSHandshakeFailure
if cfg.Debug {
capnslog.SetGlobalLogLevel(capnslog.DEBUG)
grpc.EnableTracing = true
// enable info, warning, error
grpclog.SetLoggerV2(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr))
} else {
capnslog.SetGlobalLogLevel(capnslog.INFO)
// only discard info
grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr))
}
repoLog.SetLogLevel(settings)
// TODO: deprecate with "capnslog"
if cfg.LogPkgLevels != "" {
repoLog := capnslog.MustRepoLogger("github.com/coreos/etcd")
settings, err := repoLog.ParseLogLevelConfig(cfg.LogPkgLevels)
if err != nil {
plog.Warningf("couldn't parse log level string: %s, continuing with default levels", err.Error())
return nil
}
repoLog.SetLogLevel(settings)
}
// capnslog initially SetFormatter(NewDefaultFormatter(os.Stderr))
// where NewDefaultFormatter returns NewJournaldFormatter when syscall.Getppid() == 1
// specify 'stdout' or 'stderr' to skip journald logging even when running under systemd
switch cfg.LogOutput {
case "stdout":
capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stdout, cfg.Debug))
case "stderr":
capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stderr, cfg.Debug))
case DefaultLogOutput:
default:
plog.Panicf(`unknown log-output %q (only supports %q, "stdout", "stderr")`, cfg.LogOutput, DefaultLogOutput)
}
case "zap":
// TODO: make this more configurable
lcfg := zap.Config{
Level: zap.NewAtomicLevelAt(zap.InfoLevel),
Development: false,
Sampling: &zap.SamplingConfig{
Initial: 100,
Thereafter: 100,
},
Encoding: "json",
EncoderConfig: zap.NewProductionEncoderConfig(),
}
ignoreLog := false
switch cfg.LogOutput {
case DefaultLogOutput:
if syscall.Getppid() == 1 {
// capnslog initially SetFormatter(NewDefaultFormatter(os.Stderr))
// where "NewDefaultFormatter" returns "NewJournaldFormatter"
// when syscall.Getppid() == 1, specify 'stdout' or 'stderr' to
// skip journald logging even when running under systemd
fmt.Println("running under init, which may be systemd!")
// TODO: capnlog.NewJournaldFormatter()
lcfg.OutputPaths = []string{"stderr"}
lcfg.ErrorOutputPaths = []string{"stderr"}
} else {
lcfg.OutputPaths = []string{"stderr"}
lcfg.ErrorOutputPaths = []string{"stderr"}
}
case "stderr":
lcfg.OutputPaths = []string{"stderr"}
lcfg.ErrorOutputPaths = []string{"stderr"}
case "stdout":
lcfg.OutputPaths = []string{"stdout"}
lcfg.ErrorOutputPaths = []string{"stdout"}
case "discard": // only for testing
lcfg.OutputPaths = []string{}
lcfg.ErrorOutputPaths = []string{}
ignoreLog = true
default:
lcfg.OutputPaths = []string{cfg.LogOutput}
lcfg.ErrorOutputPaths = []string{cfg.LogOutput}
}
if cfg.Debug {
lcfg.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
grpc.EnableTracing = true
}
var err error
if !ignoreLog {
cfg.logger, err = lcfg.Build()
} else {
cfg.logger = zap.NewNop()
}
if err != nil {
return err
}
cfg.loggerConfig = lcfg
grpcLogOnce.Do(func() {
// debug true, enable info, warning, error
// debug false, only discard info
var gl grpclog.LoggerV2
gl, err = logutil.NewGRPCLoggerV2(lcfg)
if err == nil {
grpclog.SetLoggerV2(gl)
}
})
if err != nil {
return err
}
logTLSHandshakeFailure := func(conn *tls.Conn, err error) {
state := conn.ConnectionState()
remoteAddr := conn.RemoteAddr().String()
serverName := state.ServerName
if len(state.PeerCertificates) > 0 {
cert := state.PeerCertificates[0]
ips := make([]string, 0, len(cert.IPAddresses))
for i := range cert.IPAddresses {
ips[i] = cert.IPAddresses[i].String()
}
cfg.logger.Warn(
"rejected connection",
zap.String("remote-addr", remoteAddr),
zap.String("server-name", serverName),
zap.Strings("ip-addresses", ips),
zap.Strings("dns-names", cert.DNSNames),
zap.Error(err),
)
} else {
cfg.logger.Warn(
"rejected connection",
zap.String("remote-addr", remoteAddr),
zap.String("server-name", serverName),
zap.Error(err),
)
}
}
cfg.ClientTLSInfo.HandshakeFailure = logTLSHandshakeFailure
cfg.PeerTLSInfo.HandshakeFailure = logTLSHandshakeFailure
default:
return fmt.Errorf("unknown logger option %q", cfg.Logger)
}
// capnslog initially SetFormatter(NewDefaultFormatter(os.Stderr))
// where NewDefaultFormatter returns NewJournaldFormatter when syscall.Getppid() == 1
// specify 'stdout' or 'stderr' to skip journald logging even when running under systemd
switch cfg.LogOutput {
case "stdout":
capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stdout, cfg.Debug))
case "stderr":
capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stderr, cfg.Debug))
case DefaultLogOutput:
default:
plog.Panicf(`unknown log-output %q (only supports %q, "stdout", "stderr")`, cfg.LogOutput, DefaultLogOutput)
}
return nil
}
func ConfigFromFile(path string) (*Config, error) {
@ -382,7 +530,8 @@ func (cfg *configYAML) configFromFile(path string) error {
if cfg.LPUrlsJSON != "" {
u, err := types.NewURLs(strings.Split(cfg.LPUrlsJSON, ","))
if err != nil {
plog.Fatalf("unexpected error setting up listen-peer-urls: %v", err)
fmt.Fprintf(os.Stderr, "unexpected error setting up listen-peer-urls: %v\n", err)
os.Exit(1)
}
cfg.LPUrls = []url.URL(u)
}
@ -390,7 +539,8 @@ func (cfg *configYAML) configFromFile(path string) error {
if cfg.LCUrlsJSON != "" {
u, err := types.NewURLs(strings.Split(cfg.LCUrlsJSON, ","))
if err != nil {
plog.Fatalf("unexpected error setting up listen-client-urls: %v", err)
fmt.Fprintf(os.Stderr, "unexpected error setting up listen-client-urls: %v\n", err)
os.Exit(1)
}
cfg.LCUrls = []url.URL(u)
}
@ -398,7 +548,8 @@ func (cfg *configYAML) configFromFile(path string) error {
if cfg.APUrlsJSON != "" {
u, err := types.NewURLs(strings.Split(cfg.APUrlsJSON, ","))
if err != nil {
plog.Fatalf("unexpected error setting up initial-advertise-peer-urls: %v", err)
fmt.Fprintf(os.Stderr, "unexpected error setting up initial-advertise-peer-urls: %v\n", err)
os.Exit(1)
}
cfg.APUrls = []url.URL(u)
}
@ -406,7 +557,8 @@ func (cfg *configYAML) configFromFile(path string) error {
if cfg.ACUrlsJSON != "" {
u, err := types.NewURLs(strings.Split(cfg.ACUrlsJSON, ","))
if err != nil {
plog.Fatalf("unexpected error setting up advertise-peer-urls: %v", err)
fmt.Fprintf(os.Stderr, "unexpected error setting up advertise-peer-urls: %v\n", err)
os.Exit(1)
}
cfg.ACUrls = []url.URL(u)
}
@ -414,7 +566,8 @@ func (cfg *configYAML) configFromFile(path string) error {
if cfg.ListenMetricsUrlsJSON != "" {
u, err := types.NewURLs(strings.Split(cfg.ListenMetricsUrlsJSON, ","))
if err != nil {
plog.Fatalf("unexpected error setting up listen-metrics-urls: %v", err)
fmt.Fprintf(os.Stderr, "unexpected error setting up listen-metrics-urls: %v\n", err)
os.Exit(1)
}
cfg.ListenMetricsUrls = []url.URL(u)
}
@ -453,6 +606,9 @@ func (cfg *configYAML) configFromFile(path string) error {
// Validate ensures that '*embed.Config' fields are properly configured.
func (cfg *Config) Validate() error {
if err := cfg.setupLogging(); err != nil {
return err
}
if err := checkBindURLs(cfg.LPUrls); err != nil {
return err
}
@ -532,13 +688,21 @@ func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, tok
token = cfg.Durl
case cfg.DNSCluster != "":
clusterStrs, cerr := cfg.GetDNSClusterNames()
lg := cfg.logger
if cerr != nil {
plog.Errorf("couldn't resolve during SRV discovery (%v)", cerr)
if lg != nil {
lg.Error("failed to resolve during SRV discovery", zap.Error(cerr))
} else {
plog.Errorf("couldn't resolve during SRV discovery (%v)", cerr)
}
return nil, "", cerr
}
for _, s := range clusterStrs {
plog.Noticef("got bootstrap from DNS for etcd-server at %s", s)
if lg != nil {
lg.Info("got bootstrap from DNS for etcd-server", zap.String("node", s))
} else {
plog.Noticef("got bootstrap from DNS for etcd-server at %s", s)
}
}
clusterStr := strings.Join(clusterStrs, ",")
if strings.Contains(clusterStr, "https://") && cfg.PeerTLSInfo.TrustedCAFile == "" {
@ -612,10 +776,14 @@ func (cfg *Config) ClientSelfCert() (err error) {
for i, u := range cfg.LCUrls {
chosts[i] = u.Host
}
cfg.ClientTLSInfo, err = transport.SelfCert(cfg.Logger, filepath.Join(cfg.Dir, "fixtures", "client"), chosts)
cfg.ClientTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "client"), chosts)
return err
} else if cfg.ClientAutoTLS {
plog.Warningf("ignoring client auto TLS since certs given")
if cfg.logger != nil {
cfg.logger.Warn("ignoring client auto TLS since certs given")
} else {
plog.Warningf("ignoring client auto TLS since certs given")
}
}
return nil
}
@ -626,10 +794,14 @@ func (cfg *Config) PeerSelfCert() (err error) {
for i, u := range cfg.LPUrls {
phosts[i] = u.Host
}
cfg.PeerTLSInfo, err = transport.SelfCert(cfg.Logger, filepath.Join(cfg.Dir, "fixtures", "peer"), phosts)
cfg.PeerTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "peer"), phosts)
return err
} else if cfg.PeerAutoTLS {
plog.Warningf("ignoring peer auto TLS since certs given")
if cfg.logger != nil {
cfg.logger.Warn("ignoring peer auto TLS since certs given")
} else {
plog.Warningf("ignoring peer auto TLS since certs given")
}
}
return nil
}

View File

@ -33,10 +33,16 @@ func TestConfigFileOtherFields(t *testing.T) {
ClientSecurityCfgFile securityConfig `json:"client-transport-security"`
PeerSecurityCfgFile securityConfig `json:"peer-transport-security"`
ForceNewCluster bool `json:"force-new-cluster"`
Logger string `json:"logger"`
LogOutput string `json:"log-output"`
Debug bool `json:"debug"`
}{
ctls,
ptls,
true,
"zap",
"discard",
false,
}
b, err := yaml.Marshal(&yc)
@ -150,6 +156,9 @@ func mustCreateCfgFile(t *testing.T, b []byte) *os.File {
func TestAutoCompactionModeInvalid(t *testing.T) {
cfg := NewConfig()
cfg.Logger = "zap"
cfg.LogOutput = "discard"
cfg.Debug = false
cfg.AutoCompactionMode = "period"
err := cfg.Validate()
if err == nil {

View File

@ -43,6 +43,7 @@ import (
"github.com/coreos/pkg/capnslog"
"github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/soheilhy/cmux"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/keepalive"
)
@ -124,7 +125,6 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
urlsmap types.URLsMap
token string
)
memberInitialized := true
if !isMemberInitialized(cfg) {
memberInitialized = false
@ -173,10 +173,11 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
InitialCorruptCheck: cfg.ExperimentalInitialCorruptCheck,
CorruptCheckTime: cfg.ExperimentalCorruptCheckTime,
PreVote: cfg.PreVote,
Logger: cfg.logger,
LoggerConfig: cfg.loggerConfig,
Debug: cfg.Debug,
ForceNewCluster: cfg.ForceNewCluster,
}
if e.Server, err = etcdserver.NewServer(srvcfg); err != nil {
return e, err
}
@ -187,7 +188,15 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
ss = append(ss, v)
}
sort.Strings(ss)
plog.Infof("%s starting with cors %q", e.Server.ID(), ss)
if e.cfg.logger != nil {
e.cfg.logger.Info(
"starting with CORS",
zap.String("server-id", e.Server.ID().String()),
zap.Strings("cors", ss),
)
} else {
plog.Infof("%s starting with cors %q", e.Server.ID(), ss)
}
}
if len(e.cfg.HostWhitelist) > 0 {
ss := make([]string, 0, len(e.cfg.HostWhitelist))
@ -195,7 +204,15 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
ss = append(ss, v)
}
sort.Strings(ss)
plog.Infof("%s starting with host whitelist %q", e.Server.ID(), ss)
if e.cfg.logger != nil {
e.cfg.logger.Info(
"starting with host whitelist",
zap.String("server-id", e.Server.ID().String()),
zap.Strings("hosts", ss),
)
} else {
plog.Infof("%s starting with host whitelist %q", e.Server.ID(), ss)
}
}
// buffer channel so goroutines on closed connections won't wait forever
@ -321,10 +338,18 @@ func (e *Etcd) Err() <-chan error { return e.errc }
func startPeerListeners(cfg *Config) (peers []*peerListener, err error) {
if err = cfg.PeerSelfCert(); err != nil {
plog.Fatalf("could not get certs (%v)", err)
if cfg.logger != nil {
cfg.logger.Fatal("failed to get peer self-signed certs", zap.Error(err))
} else {
plog.Fatalf("could not get certs (%v)", err)
}
}
if !cfg.PeerTLSInfo.Empty() {
plog.Infof("peerTLS: %s", cfg.PeerTLSInfo)
if cfg.logger != nil {
cfg.logger.Info("starting with peer TLS", zap.String("tls-info", fmt.Sprintf("%+v", cfg.PeerTLSInfo)))
} else {
plog.Infof("peerTLS: %s", cfg.PeerTLSInfo)
}
}
peers = make([]*peerListener, len(cfg.LPUrls))
@ -334,7 +359,11 @@ func startPeerListeners(cfg *Config) (peers []*peerListener, err error) {
}
for i := range peers {
if peers[i] != nil && peers[i].close != nil {
plog.Info("stopping listening for peers on ", cfg.LPUrls[i].String())
if cfg.logger != nil {
cfg.logger.Info("stopping listening for peers", zap.String("address", cfg.LPUrls[i].String()))
} else {
plog.Info("stopping listening for peers on ", cfg.LPUrls[i].String())
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
peers[i].close(ctx)
cancel()
@ -345,10 +374,18 @@ func startPeerListeners(cfg *Config) (peers []*peerListener, err error) {
for i, u := range cfg.LPUrls {
if u.Scheme == "http" {
if !cfg.PeerTLSInfo.Empty() {
plog.Warningf("The scheme of peer url %s is HTTP while peer key/cert files are presented. Ignored peer key/cert files.", u.String())
if cfg.logger != nil {
cfg.logger.Warn("scheme is HTTP while key and cert files are present; ignoring key and cert files", zap.String("peer-url", u.String()))
} else {
plog.Warningf("The scheme of peer url %s is HTTP while peer key/cert files are presented. Ignored peer key/cert files.", u.String())
}
}
if cfg.PeerTLSInfo.ClientCertAuth {
plog.Warningf("The scheme of peer url %s is HTTP while client cert auth (--peer-client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String())
if cfg.logger != nil {
cfg.logger.Warn("scheme is HTTP while --peer-client-cert-auth is enabled; ignoring client cert auth for this URL", zap.String("peer-url", u.String()))
} else {
plog.Warningf("The scheme of peer url %s is HTTP while client cert auth (--peer-client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String())
}
}
}
peers[i] = &peerListener{close: func(context.Context) error { return nil }}
@ -360,7 +397,11 @@ func startPeerListeners(cfg *Config) (peers []*peerListener, err error) {
peers[i].close = func(context.Context) error {
return peers[i].Listener.Close()
}
plog.Info("listening for peers on ", u.String())
if cfg.logger != nil {
cfg.logger.Info("listening for peers", zap.String("address", u.String()))
} else {
plog.Info("listening for peers on ", u.String())
}
}
return peers, nil
}
@ -406,22 +447,38 @@ func (e *Etcd) servePeers() (err error) {
func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) {
if err = cfg.ClientSelfCert(); err != nil {
plog.Fatalf("could not get certs (%v)", err)
if cfg.logger != nil {
cfg.logger.Fatal("failed to get client self-signed certs", zap.Error(err))
} else {
plog.Fatalf("could not get certs (%v)", err)
}
}
if cfg.EnablePprof {
plog.Infof("pprof is enabled under %s", debugutil.HTTPPrefixPProf)
if cfg.logger != nil {
cfg.logger.Info("pprof is enabled", zap.String("path", debugutil.HTTPPrefixPProf))
} else {
plog.Infof("pprof is enabled under %s", debugutil.HTTPPrefixPProf)
}
}
sctxs = make(map[string]*serveCtx)
for _, u := range cfg.LCUrls {
sctx := newServeCtx()
sctx := newServeCtx(cfg.logger)
if u.Scheme == "http" || u.Scheme == "unix" {
if !cfg.ClientTLSInfo.Empty() {
plog.Warningf("The scheme of client url %s is HTTP while peer key/cert files are presented. Ignored key/cert files.", u.String())
if cfg.logger != nil {
cfg.logger.Warn("scheme is HTTP while key and cert files are present; ignoring key and cert files", zap.String("client-url", u.String()))
} else {
plog.Warningf("The scheme of client url %s is HTTP while peer key/cert files are presented. Ignored key/cert files.", u.String())
}
}
if cfg.ClientTLSInfo.ClientCertAuth {
plog.Warningf("The scheme of client url %s is HTTP while client cert auth (--client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String())
if cfg.logger != nil {
cfg.logger.Warn("scheme is HTTP while --client-cert-auth is enabled; ignoring client cert auth for this URL", zap.String("client-url", u.String()))
} else {
plog.Warningf("The scheme of client url %s is HTTP while client cert auth (--client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String())
}
}
}
if (u.Scheme == "https" || u.Scheme == "unixs") && cfg.ClientTLSInfo.Empty() {
@ -452,7 +509,15 @@ func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) {
if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil {
if fdLimit <= reservedInternalFDNum {
plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum)
if cfg.logger != nil {
cfg.logger.Fatal(
"file descriptor limit of etcd process is too low; please set higher",
zap.Uint64("limit", fdLimit),
zap.Int("recommended-limit", reservedInternalFDNum),
)
} else {
plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum)
}
}
sctx.l = transport.LimitListener(sctx.l, int(fdLimit-reservedInternalFDNum))
}
@ -463,11 +528,19 @@ func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) {
}
}
plog.Info("listening for client requests on ", u.Host)
if cfg.logger != nil {
cfg.logger.Info("listening for client requests", zap.String("host", u.Host))
} else {
plog.Info("listening for client requests on ", u.Host)
}
defer func() {
if err != nil {
sctx.l.Close()
plog.Info("stopping listening for client requests on ", u.Host)
if cfg.logger != nil {
cfg.logger.Info("stopping listening for client requests", zap.String("host", u.Host))
} else {
plog.Info("stopping listening for client requests on ", u.Host)
}
}
}()
for k := range cfg.UserHandlers {
@ -487,14 +560,18 @@ func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) {
func (e *Etcd) serveClients() (err error) {
if !e.cfg.ClientTLSInfo.Empty() {
plog.Infof("ClientTLS: %s", e.cfg.ClientTLSInfo)
if e.cfg.logger != nil {
e.cfg.logger.Info("starting with client TLS", zap.String("tls-info", fmt.Sprintf("%+v", e.cfg.ClientTLSInfo)))
} else {
plog.Infof("ClientTLS: %s", e.cfg.ClientTLSInfo)
}
}
// Start a client server goroutine for each listen address
var h http.Handler
if e.Config().EnableV2 {
if len(e.Config().ExperimentalEnableV2V3) > 0 {
srv := v2v3.NewServer(v3client.New(e.Server), e.cfg.ExperimentalEnableV2V3)
srv := v2v3.NewServer(e.cfg.logger, v3client.New(e.Server), e.cfg.ExperimentalEnableV2V3)
h = v2http.NewClientHandler(srv, e.Server.Cfg.ReqTimeout())
} else {
h = v2http.NewClientHandler(e.Server, e.Server.Cfg.ReqTimeout())
@ -549,7 +626,11 @@ func (e *Etcd) serveMetrics() (err error) {
}
e.metricsListeners = append(e.metricsListeners, ml)
go func(u url.URL, ln net.Listener) {
plog.Info("listening for metrics on ", u.String())
if e.cfg.logger != nil {
e.cfg.logger.Info("listening for metrics", zap.String("url", u.String()))
} else {
plog.Info("listening for metrics on ", u.String())
}
e.errHandler(http.Serve(ln, metricsMux))
}(murl, ml)
}
@ -569,6 +650,14 @@ func (e *Etcd) errHandler(err error) {
}
}
// GetLogger returns the logger.
func (e *Etcd) GetLogger() *zap.Logger {
e.cfg.loggerMu.RLock()
l := e.cfg.logger
e.cfg.loggerMu.RUnlock()
return l
}
func parseCompactionRetention(mode, retention string) (ret time.Duration, err error) {
h, err := strconv.Atoi(retention)
if err == nil {

View File

@ -40,12 +40,14 @@ import (
gw "github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/soheilhy/cmux"
"github.com/tmc/grpc-websocket-proxy/wsproxy"
"go.uber.org/zap"
"golang.org/x/net/trace"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
type serveCtx struct {
lg *zap.Logger
l net.Listener
addr string
secure bool
@ -65,10 +67,14 @@ type servers struct {
http *http.Server
}
func newServeCtx() *serveCtx {
func newServeCtx(lg *zap.Logger) *serveCtx {
ctx, cancel := context.WithCancel(context.Background())
return &serveCtx{ctx: ctx, cancel: cancel, userHandlers: make(map[string]http.Handler),
serversC: make(chan *servers, 2), // in case sctx.insecure,sctx.secure true
return &serveCtx{
lg: lg,
ctx: ctx,
cancel: cancel,
userHandlers: make(map[string]http.Handler),
serversC: make(chan *servers, 2), // in case sctx.insecure,sctx.secure true
}
}
@ -83,7 +89,12 @@ func (sctx *serveCtx) serve(
gopts ...grpc.ServerOption) (err error) {
logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0)
<-s.ReadyNotify()
plog.Info("ready to serve client requests")
if sctx.lg != nil {
sctx.lg.Info("ready to server client requests")
} else {
plog.Info("ready to serve client requests")
}
m := cmux.New(sctx.l)
v3c := v3client.New(s)
@ -116,14 +127,21 @@ func (sctx *serveCtx) serve(
httpmux := sctx.createMux(gwmux, handler)
srvhttp := &http.Server{
Handler: createAccessController(s, httpmux),
Handler: createAccessController(sctx.lg, s, httpmux),
ErrorLog: logger, // do not log user error
}
httpl := m.Match(cmux.HTTP1())
go func() { errHandler(srvhttp.Serve(httpl)) }()
sctx.serversC <- &servers{grpc: gs, http: srvhttp}
plog.Noticef("serving insecure client requests on %s, this is strongly discouraged!", sctx.l.Addr().String())
if sctx.lg != nil {
sctx.lg.Info(
"serving insecure client requests; this is strongly discouraged!",
zap.String("address", sctx.l.Addr().String()),
)
} else {
plog.Noticef("serving insecure client requests on %s, this is strongly discouraged!", sctx.l.Addr().String())
}
}
if sctx.secure {
@ -159,14 +177,21 @@ func (sctx *serveCtx) serve(
httpmux := sctx.createMux(gwmux, handler)
srv := &http.Server{
Handler: createAccessController(s, httpmux),
Handler: createAccessController(sctx.lg, s, httpmux),
TLSConfig: tlscfg,
ErrorLog: logger, // do not log user error
}
go func() { errHandler(srv.Serve(tlsl)) }()
sctx.serversC <- &servers{secure: true, grpc: gs, http: srv}
plog.Infof("serving client requests on %s", sctx.l.Addr().String())
if sctx.lg != nil {
sctx.lg.Info(
"serving client requests",
zap.String("address", sctx.l.Addr().String()),
)
} else {
plog.Infof("serving client requests on %s", sctx.l.Addr().String())
}
}
close(sctx.serversC)
@ -218,7 +243,15 @@ func (sctx *serveCtx) registerGateway(opts []grpc.DialOption) (*gw.ServeMux, err
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
plog.Warningf("failed to close conn to %s: %v", sctx.l.Addr().String(), cerr)
if sctx.lg != nil {
sctx.lg.Warn(
"failed to close connection",
zap.String("address", sctx.l.Addr().String()),
zap.Error(cerr),
)
} else {
plog.Warningf("failed to close conn to %s: %v", sctx.l.Addr().String(), cerr)
}
}
}()
@ -254,11 +287,12 @@ func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.
// - mutate gRPC gateway request paths
// - check hostname whitelist
// client HTTP requests goes here first
func createAccessController(s *etcdserver.EtcdServer, mux *http.ServeMux) http.Handler {
return &accessController{s: s, mux: mux}
func createAccessController(lg *zap.Logger, s *etcdserver.EtcdServer, mux *http.ServeMux) http.Handler {
return &accessController{lg: lg, s: s, mux: mux}
}
type accessController struct {
lg *zap.Logger
s *etcdserver.EtcdServer
mux *http.ServeMux
}
@ -272,7 +306,14 @@ func (ac *accessController) ServeHTTP(rw http.ResponseWriter, req *http.Request)
if req.TLS == nil { // check origin if client connection is not secure
host := httputil.GetHostname(req)
if !ac.s.AccessController.IsHostWhitelisted(host) {
plog.Warningf("rejecting HTTP request from %q to prevent DNS rebinding attacks", host)
if ac.lg != nil {
ac.lg.Warn(
"rejecting HTTP request to prevent DNS rebinding attacks",
zap.String("host", host),
)
} else {
plog.Warningf("rejecting HTTP request from %q to prevent DNS rebinding attacks", host)
}
// TODO: use Go's "http.StatusMisdirectedRequest" (421)
// https://github.com/golang/go/commit/4b8a7eafef039af1834ef9bfa879257c4a72b7b5
http.Error(rw, errCVE20185702(host), 421)
@ -347,7 +388,11 @@ func (ch *corsHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
func (sctx *serveCtx) registerUserHandler(s string, h http.Handler) {
if sctx.userHandlers[s] != nil {
plog.Warningf("path %s already registered by user handler", s)
if sctx.lg != nil {
sctx.lg.Warn("path is already registered by user handler", zap.String("path", s))
} else {
plog.Warningf("path %s already registered by user handler", s)
}
return
}
sctx.userHandlers[s] = h

View File

@ -25,6 +25,5 @@ func isMemberInitialized(cfg *Config) bool {
if waldir == "" {
waldir = filepath.Join(cfg.Dir, "member", "wal")
}
return wal.Exist(waldir)
}

View File

@ -35,6 +35,7 @@ import (
bolt "github.com/coreos/bbolt"
"github.com/urfave/cli"
"go.uber.org/zap"
)
func NewBackupCommand() cli.Command {
@ -86,7 +87,7 @@ func handleBackup(c *cli.Context) error {
metadata.NodeID = idgen.Next()
metadata.ClusterID = idgen.Next()
neww, err := wal.Create(destWAL, pbutil.MustMarshal(&metadata))
neww, err := wal.Create(zap.NewExample(), destWAL, pbutil.MustMarshal(&metadata))
if err != nil {
log.Fatal(err)
}
@ -102,14 +103,14 @@ func handleBackup(c *cli.Context) error {
}
func saveSnap(destSnap, srcSnap string) (walsnap walpb.Snapshot) {
ss := raftsnap.New(srcSnap)
ss := raftsnap.New(zap.NewExample(), srcSnap)
snapshot, err := ss.Load()
if err != nil && err != raftsnap.ErrNoSnapshot {
log.Fatal(err)
}
if snapshot != nil {
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
newss := raftsnap.New(destSnap)
newss := raftsnap.New(zap.NewExample(), destSnap)
if err = newss.SaveSnap(*snapshot); err != nil {
log.Fatal(err)
}
@ -118,7 +119,7 @@ func saveSnap(destSnap, srcSnap string) (walsnap walpb.Snapshot) {
}
func loadWAL(srcWAL string, walsnap walpb.Snapshot, v3 bool) (etcdserverpb.Metadata, raftpb.HardState, []raftpb.Entry) {
w, err := wal.OpenForRead(srcWAL, walsnap)
w, err := wal.OpenForRead(zap.NewExample(), srcWAL, walsnap)
if err != nil {
log.Fatal(err)
}

View File

@ -43,6 +43,7 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/spf13/cobra"
"go.uber.org/zap"
)
var (
@ -127,7 +128,7 @@ func prepareBackend() backend.Backend {
func rebuildStoreV2() (v2store.Store, uint64) {
var index uint64
cl := membership.NewCluster("")
cl := membership.NewCluster(zap.NewExample(), "")
waldir := migrateWALdir
if len(waldir) == 0 {
@ -135,7 +136,7 @@ func rebuildStoreV2() (v2store.Store, uint64) {
}
snapdir := filepath.Join(migrateDatadir, "member", "snap")
ss := raftsnap.New(snapdir)
ss := raftsnap.New(zap.NewExample(), snapdir)
snapshot, err := ss.Load()
if err != nil && err != raftsnap.ErrNoSnapshot {
ExitWithError(ExitError, err)
@ -147,7 +148,7 @@ func rebuildStoreV2() (v2store.Store, uint64) {
index = snapshot.Metadata.Index
}
w, err := wal.OpenForRead(waldir, walsnap)
w, err := wal.OpenForRead(zap.NewExample(), waldir, walsnap)
if err != nil {
ExitWithError(ExitError, err)
}
@ -169,7 +170,7 @@ func rebuildStoreV2() (v2store.Store, uint64) {
cl.SetStore(st)
cl.Recover(api.UpdateCapability)
applier := etcdserver.NewApplierV2(st, cl)
applier := etcdserver.NewApplierV2(zap.NewExample(), st, cl)
for _, ent := range ents {
if ent.Type == raftpb.EntryConfChange {
var cc raftpb.ConfChange

View File

@ -95,14 +95,10 @@ func snapshotSaveCommandFunc(cmd *cobra.Command, args []string) {
ExitWithError(ExitBadArgs, err)
}
debug, err := cmd.Flags().GetBool("debug")
lg, err := zap.NewProduction()
if err != nil {
ExitWithError(ExitError, err)
}
lg := zap.NewNop()
if debug {
lg = zap.NewExample()
}
sp := snapshot.NewV3(lg)
cfg := mustClientCfgFromCmd(cmd)
@ -120,14 +116,10 @@ func snapshotStatusCommandFunc(cmd *cobra.Command, args []string) {
}
initDisplayFromCmd(cmd)
debug, err := cmd.Flags().GetBool("debug")
lg, err := zap.NewProduction()
if err != nil {
ExitWithError(ExitError, err)
}
lg := zap.NewNop()
if debug {
lg = zap.NewExample()
}
sp := snapshot.NewV3(lg)
ds, err := sp.Status(args[0])
if err != nil {
@ -152,14 +144,10 @@ func snapshotRestoreCommandFunc(cmd *cobra.Command, args []string) {
walDir = filepath.Join(dataDir, "member", "wal")
}
debug, err := cmd.Flags().GetBool("debug")
lg, err := zap.NewProduction()
if err != nil {
ExitWithError(ExitError, err)
}
lg := zap.NewNop()
if debug {
lg = zap.NewExample()
}
sp := snapshot.NewV3(lg)
if err := sp.Restore(snapshot.RestoreConfig{

View File

@ -20,6 +20,7 @@ import (
"flag"
"fmt"
"io/ioutil"
"log"
"net/url"
"os"
"runtime"
@ -31,6 +32,7 @@ import (
"github.com/coreos/etcd/version"
"github.com/ghodss/yaml"
"go.uber.org/zap"
)
var (
@ -213,9 +215,10 @@ func newConfig() *config {
fs.Var(flags.NewUniqueStringsValue("*"), "host-whitelist", "Comma-separated acceptable hostnames from HTTP client requests, if server is not secure (empty means allow all).")
// logging
fs.BoolVar(&cfg.ec.Debug, "debug", false, "Enable debug-level logging for etcd.")
fs.StringVar(&cfg.ec.LogPkgLevels, "log-package-levels", "", "Specify a particular log level for each etcd package (eg: 'etcdmain=CRITICAL,etcdserver=DEBUG').")
fs.StringVar(&cfg.ec.Logger, "logger", "capnslog", "Specify 'zap' for structured logging or 'capnslog'.")
fs.StringVar(&cfg.ec.LogOutput, "log-output", embed.DefaultLogOutput, "Specify 'stdout' or 'stderr' to skip journald logging even when running under systemd.")
fs.BoolVar(&cfg.ec.Debug, "debug", false, "Enable debug-level logging for etcd.")
fs.StringVar(&cfg.ec.LogPkgLevels, "log-package-levels", "", "(To be deprecated) Specify a particular log level for each etcd package (eg: 'etcdmain=CRITICAL,etcdserver=DEBUG').")
// version
fs.BoolVar(&cfg.printVersion, "version", false, "Print the version and exit.")
@ -271,18 +274,26 @@ func (cfg *config) parse(arguments []string) error {
var err error
if cfg.configFile != "" {
plog.Infof("Loading server configuration from %q. Other configuration command line flags and environment variables will be ignored if provided.", cfg.configFile)
err = cfg.configFromFile(cfg.configFile)
if lg := cfg.ec.GetLogger(); lg != nil {
lg.Info(
"loaded server configuraionl, other configuration command line flags and environment variables will be ignored if provided",
zap.String("path", cfg.configFile),
)
} else {
plog.Infof("Loading server configuration from %q. Other configuration command line flags and environment variables will be ignored if provided.", cfg.configFile)
}
} else {
err = cfg.configFromCmdLine()
}
// now logger is set up
return err
}
func (cfg *config) configFromCmdLine() error {
err := flags.SetFlagsFromEnv("ETCD", cfg.cf.flagSet)
if err != nil {
plog.Fatalf("%v", err)
return err
}
cfg.ec.LPUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-peer-urls")
@ -331,21 +342,21 @@ func (cfg *config) configFromFile(path string) error {
if cfg.ec.ListenMetricsUrlsJSON != "" {
us, err := types.NewURLs(strings.Split(cfg.ec.ListenMetricsUrlsJSON, ","))
if err != nil {
plog.Panicf("unexpected error setting up listen-metrics-urls: %v", err)
log.Fatalf("unexpected error setting up listen-metrics-urls: %v", err)
}
cfg.ec.ListenMetricsUrls = []url.URL(us)
}
if cfg.cp.FallbackJSON != "" {
if err := cfg.cf.fallback.Set(cfg.cp.FallbackJSON); err != nil {
plog.Panicf("unexpected error setting up discovery-fallback flag: %v", err)
log.Fatalf("unexpected error setting up discovery-fallback flag: %v", err)
}
cfg.cp.Fallback = cfg.cf.fallback.String()
}
if cfg.cp.ProxyJSON != "" {
if err := cfg.cf.proxy.Set(cfg.cp.ProxyJSON); err != nil {
plog.Panicf("unexpected error setting up proxyFlag: %v", err)
log.Fatalf("unexpected error setting up proxyFlag: %v", err)
}
cfg.cp.Proxy = cfg.cf.proxy.String()
}

View File

@ -39,6 +39,7 @@ import (
"github.com/coreos/etcd/version"
"github.com/coreos/pkg/capnslog"
"go.uber.org/zap"
"google.golang.org/grpc"
)
@ -60,42 +61,86 @@ func startEtcdOrProxyV2() {
err := cfg.parse(os.Args[1:])
if err != nil {
plog.Errorf("error verifying flags, %v. See 'etcd --help'.", err)
lg := cfg.ec.GetLogger()
if lg != nil {
lg.Error("failed to verify flags", zap.Error(err))
} else {
plog.Errorf("error verifying flags, %v. See 'etcd --help'.", err)
}
switch err {
case embed.ErrUnsetAdvertiseClientURLsFlag:
plog.Errorf("When listening on specific address(es), this etcd process must advertise accessible url(s) to each connected client.")
if lg != nil {
lg.Error("advertise client URLs are not set", zap.Error(err))
} else {
plog.Errorf("When listening on specific address(es), this etcd process must advertise accessible url(s) to each connected client.")
}
}
os.Exit(1)
}
cfg.ec.SetupLogging()
var stopped <-chan struct{}
var errc <-chan error
maxProcs, cpus := runtime.GOMAXPROCS(0), runtime.NumCPU()
plog.Infof("etcd Version: %s\n", version.Version)
plog.Infof("Git SHA: %s\n", version.GitSHA)
plog.Infof("Go Version: %s\n", runtime.Version())
plog.Infof("Go OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH)
GoMaxProcs := runtime.GOMAXPROCS(0)
plog.Infof("setting maximum number of CPUs to %d, total number of available CPUs is %d", GoMaxProcs, runtime.NumCPU())
lg := cfg.ec.GetLogger()
if lg != nil {
lg.Info(
"starting etcd",
zap.String("etcd-version", version.Version),
zap.String("git-sha", version.GitSHA),
zap.String("go-version", runtime.Version()),
zap.String("go-os", runtime.GOOS),
zap.String("go-arch", runtime.GOARCH),
zap.Int("max-cpu-set", maxProcs),
zap.Int("max-cpu-available", cpus),
)
} else {
plog.Infof("etcd Version: %s\n", version.Version)
plog.Infof("Git SHA: %s\n", version.GitSHA)
plog.Infof("Go Version: %s\n", runtime.Version())
plog.Infof("Go OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH)
plog.Infof("setting maximum number of CPUs to %d, total number of available CPUs is %d", maxProcs, cpus)
}
defaultHost, dhErr := (&cfg.ec).UpdateDefaultClusterFromName(defaultInitialCluster)
if defaultHost != "" {
plog.Infof("advertising using detected default host %q", defaultHost)
if lg != nil {
lg.Info(
"detected default host for advertise",
zap.String("host", defaultHost),
)
} else {
plog.Infof("advertising using detected default host %q", defaultHost)
}
}
if dhErr != nil {
plog.Noticef("failed to detect default host (%v)", dhErr)
if lg != nil {
lg.Info("failed to detect default host", zap.Error(dhErr))
} else {
plog.Noticef("failed to detect default host (%v)", dhErr)
}
}
if cfg.ec.Dir == "" {
cfg.ec.Dir = fmt.Sprintf("%v.etcd", cfg.ec.Name)
plog.Warningf("no data-dir provided, using default data-dir ./%s", cfg.ec.Dir)
if lg != nil {
lg.Warn(
"'data-dir' was empty; using default",
zap.String("data-dir", cfg.ec.Dir),
)
} else {
plog.Warningf("no data-dir provided, using default data-dir ./%s", cfg.ec.Dir)
}
}
which := identifyDataDirOrDie(cfg.ec.Dir)
var stopped <-chan struct{}
var errc <-chan error
which := identifyDataDirOrDie(cfg.ec.GetLogger(), cfg.ec.Dir)
if which != dirEmpty {
plog.Noticef("the server is already initialized as %v before, starting as etcd %v...", which, which)
if lg != nil {
} else {
plog.Noticef("the server is already initialized as %v before, starting as etcd %v...", which, which)
}
switch which {
case dirMember:
stopped, errc, err = startEtcd(&cfg.ec)
@ -110,7 +155,11 @@ func startEtcdOrProxyV2() {
stopped, errc, err = startEtcd(&cfg.ec)
if derr, ok := err.(*etcdserver.DiscoveryError); ok && derr.Err == discovery.ErrFullCluster {
if cfg.shouldFallbackToProxy() {
plog.Noticef("discovery cluster full, falling back to %s", fallbackFlagProxy)
if lg != nil {
} else {
plog.Noticef("discovery cluster full, falling back to %s", fallbackFlagProxy)
}
shouldProxy = true
}
}
@ -124,51 +173,109 @@ func startEtcdOrProxyV2() {
if derr, ok := err.(*etcdserver.DiscoveryError); ok {
switch derr.Err {
case discovery.ErrDuplicateID:
plog.Errorf("member %q has previously registered with discovery service token (%s).", cfg.ec.Name, cfg.ec.Durl)
plog.Errorf("But etcd could not find valid cluster configuration in the given data dir (%s).", cfg.ec.Dir)
plog.Infof("Please check the given data dir path if the previous bootstrap succeeded")
plog.Infof("or use a new discovery token if the previous bootstrap failed.")
if lg != nil {
lg.Error(
"member has been registered with discovery service",
zap.String("name", cfg.ec.Name),
zap.String("discovery-token", cfg.ec.Durl),
zap.Error(derr.Err),
)
lg.Error(
"but could not find valid cluster configuration",
zap.String("data-dir", cfg.ec.Dir),
)
lg.Warn("check data dir if previous bootstrap succeeded")
lg.Warn("or use a new discovery token if previous bootstrap failed")
} else {
plog.Errorf("member %q has previously registered with discovery service token (%s).", cfg.ec.Name, cfg.ec.Durl)
plog.Errorf("But etcd could not find valid cluster configuration in the given data dir (%s).", cfg.ec.Dir)
plog.Infof("Please check the given data dir path if the previous bootstrap succeeded")
plog.Infof("or use a new discovery token if the previous bootstrap failed.")
}
case discovery.ErrDuplicateName:
plog.Errorf("member with duplicated name has registered with discovery service token(%s).", cfg.ec.Durl)
plog.Errorf("please check (cURL) the discovery token for more information.")
plog.Errorf("please do not reuse the discovery token and generate a new one to bootstrap the cluster.")
if lg != nil {
lg.Error(
"member with duplicated name has already been registered",
zap.String("discovery-token", cfg.ec.Durl),
zap.Error(derr.Err),
)
lg.Warn("cURL the discovery token URL for details")
lg.Warn("do not reuse discovery token; generate a new one to bootstrap a cluster")
} else {
plog.Errorf("member with duplicated name has registered with discovery service token(%s).", cfg.ec.Durl)
plog.Errorf("please check (cURL) the discovery token for more information.")
plog.Errorf("please do not reuse the discovery token and generate a new one to bootstrap the cluster.")
}
default:
plog.Errorf("%v", err)
plog.Infof("discovery token %s was used, but failed to bootstrap the cluster.", cfg.ec.Durl)
plog.Infof("please generate a new discovery token and try to bootstrap again.")
if lg != nil {
lg.Error(
"failed to bootstrap; discovery token was already used",
zap.String("discovery-token", cfg.ec.Durl),
zap.Error(err),
)
lg.Warn("do not reuse discovery token; generate a new one to bootstrap a cluster")
} else {
plog.Errorf("%v", err)
plog.Infof("discovery token %s was used, but failed to bootstrap the cluster.", cfg.ec.Durl)
plog.Infof("please generate a new discovery token and try to bootstrap again.")
}
}
os.Exit(1)
}
if strings.Contains(err.Error(), "include") && strings.Contains(err.Error(), "--initial-cluster") {
plog.Infof("%v", err)
if lg != nil {
lg.Error("failed to start", zap.Error(err))
} else {
plog.Infof("%v", err)
}
if cfg.ec.InitialCluster == cfg.ec.InitialClusterFromName(cfg.ec.Name) {
plog.Infof("forgot to set --initial-cluster flag?")
if lg != nil {
lg.Warn("forgot to set --initial-cluster?")
} else {
plog.Infof("forgot to set --initial-cluster flag?")
}
}
if types.URLs(cfg.ec.APUrls).String() == embed.DefaultInitialAdvertisePeerURLs {
plog.Infof("forgot to set --initial-advertise-peer-urls flag?")
if lg != nil {
lg.Warn("forgot to set --initial-advertise-peer-urls?")
} else {
plog.Infof("forgot to set --initial-advertise-peer-urls flag?")
}
}
if cfg.ec.InitialCluster == cfg.ec.InitialClusterFromName(cfg.ec.Name) && len(cfg.ec.Durl) == 0 {
plog.Infof("if you want to use discovery service, please set --discovery flag.")
if lg != nil {
lg.Warn("--discovery flag is not set")
} else {
plog.Infof("if you want to use discovery service, please set --discovery flag.")
}
}
os.Exit(1)
}
plog.Fatalf("%v", err)
if lg != nil {
lg.Fatal("discovery failed", zap.Error(err))
} else {
plog.Fatalf("%v", err)
}
}
osutil.HandleInterrupts()
osutil.HandleInterrupts(lg)
// At this point, the initialization of etcd is done.
// The listeners are listening on the TCP ports and ready
// for accepting connections. The etcd instance should be
// joined with the cluster and ready to serve incoming
// connections.
notifySystemd()
notifySystemd(lg)
select {
case lerr := <-errc:
// fatal out on listener errors
plog.Fatal(lerr)
if lg != nil {
lg.Fatal("listener failed", zap.Error(err))
} else {
plog.Fatal(lerr)
}
case <-stopped:
}
@ -191,7 +298,12 @@ func startEtcd(cfg *embed.Config) (<-chan struct{}, <-chan error, error) {
// startProxy launches an HTTP proxy for client communication which proxies to other etcd nodes.
func startProxy(cfg *config) error {
plog.Notice("proxy: this proxy supports v2 API only!")
lg := cfg.ec.GetLogger()
if lg != nil {
lg.Info("v2 API proxy starting")
} else {
plog.Notice("proxy: this proxy supports v2 API only!")
}
clientTLSInfo := cfg.ec.ClientTLSInfo
if clientTLSInfo.Empty() {
@ -209,7 +321,11 @@ func startProxy(cfg *config) error {
pt.MaxIdleConnsPerHost = httpproxy.DefaultMaxIdleConnsPerHost
if err = cfg.ec.PeerSelfCert(); err != nil {
plog.Fatalf("could not get certs (%v)", err)
if lg != nil {
lg.Fatal("failed to get self-signed certs for peer", zap.Error(err))
} else {
plog.Fatalf("could not get certs (%v)", err)
}
}
tr, err := transport.NewTimeoutTransport(cfg.ec.PeerTLSInfo, time.Duration(cfg.cp.ProxyDialTimeoutMs)*time.Millisecond, time.Duration(cfg.cp.ProxyReadTimeoutMs)*time.Millisecond, time.Duration(cfg.cp.ProxyWriteTimeoutMs)*time.Millisecond)
if err != nil {
@ -229,10 +345,24 @@ func startProxy(cfg *config) error {
switch {
case err == nil:
if cfg.ec.Durl != "" {
plog.Warningf("discovery token ignored since the proxy has already been initialized. Valid cluster file found at %q", clusterfile)
if lg != nil {
lg.Warn(
"discovery token ignored since the proxy has already been initialized; valid cluster file found",
zap.String("cluster-file", clusterfile),
)
} else {
plog.Warningf("discovery token ignored since the proxy has already been initialized. Valid cluster file found at %q", clusterfile)
}
}
if cfg.ec.DNSCluster != "" {
plog.Warningf("DNS SRV discovery ignored since the proxy has already been initialized. Valid cluster file found at %q", clusterfile)
if lg != nil {
lg.Warn(
"DNS SRV discovery ignored since the proxy has already been initialized; valid cluster file found",
zap.String("cluster-file", clusterfile),
)
} else {
plog.Warningf("DNS SRV discovery ignored since the proxy has already been initialized. Valid cluster file found at %q", clusterfile)
}
}
urls := struct{ PeerURLs []string }{}
err = json.Unmarshal(b, &urls)
@ -240,7 +370,15 @@ func startProxy(cfg *config) error {
return err
}
peerURLs = urls.PeerURLs
plog.Infof("proxy: using peer urls %v from cluster file %q", peerURLs, clusterfile)
if lg != nil {
lg.Info(
"proxy using peer URLS from cluster file",
zap.Strings("peer-urls", peerURLs),
zap.String("cluster-file", clusterfile),
)
} else {
plog.Infof("proxy: using peer urls %v from cluster file %q", peerURLs, clusterfile)
}
case os.IsNotExist(err):
var urlsmap types.URLsMap
urlsmap, _, err = cfg.ec.PeerURLsMapAndToken("proxy")
@ -259,41 +397,75 @@ func startProxy(cfg *config) error {
}
}
peerURLs = urlsmap.URLs()
plog.Infof("proxy: using peer urls %v ", peerURLs)
if lg != nil {
lg.Info("proxy using peer URLS", zap.Strings("peer-urls", peerURLs))
} else {
plog.Infof("proxy: using peer urls %v ", peerURLs)
}
default:
return err
}
clientURLs := []string{}
uf := func() []string {
gcls, gerr := etcdserver.GetClusterFromRemotePeers(peerURLs, tr)
gcls, gerr := etcdserver.GetClusterFromRemotePeers(lg, peerURLs, tr)
if gerr != nil {
plog.Warningf("proxy: %v", gerr)
if lg != nil {
lg.Warn(
"failed to get cluster from remote peers",
zap.Strings("peer-urls", peerURLs),
zap.Error(gerr),
)
} else {
plog.Warningf("proxy: %v", gerr)
}
return []string{}
}
clientURLs = gcls.ClientURLs()
urls := struct{ PeerURLs []string }{gcls.PeerURLs()}
b, jerr := json.Marshal(urls)
if jerr != nil {
plog.Warningf("proxy: error on marshal peer urls %s", jerr)
if lg != nil {
lg.Warn("proxy failed to marshal peer URLs", zap.Error(jerr))
} else {
plog.Warningf("proxy: error on marshal peer urls %s", jerr)
}
return clientURLs
}
err = pkgioutil.WriteAndSyncFile(clusterfile+".bak", b, 0600)
if err != nil {
plog.Warningf("proxy: error on writing urls %s", err)
if lg != nil {
lg.Warn("proxy failed to write cluster file", zap.Error(err))
} else {
plog.Warningf("proxy: error on writing urls %s", err)
}
return clientURLs
}
err = os.Rename(clusterfile+".bak", clusterfile)
if err != nil {
plog.Warningf("proxy: error on updating clusterfile %s", err)
if lg != nil {
lg.Warn(
"proxy failed to rename cluster file",
zap.String("path", clusterfile),
zap.Error(err),
)
} else {
plog.Warningf("proxy: error on updating clusterfile %s", err)
}
return clientURLs
}
if !reflect.DeepEqual(gcls.PeerURLs(), peerURLs) {
plog.Noticef("proxy: updated peer urls in cluster file from %v to %v", peerURLs, gcls.PeerURLs())
if lg != nil {
lg.Info(
"proxy updated peer URLs",
zap.Strings("from", peerURLs),
zap.Strings("to", gcls.PeerURLs()),
)
} else {
plog.Noticef("proxy: updated peer urls in cluster file from %v to %v", peerURLs, gcls.PeerURLs())
}
}
peerURLs = gcls.PeerURLs()
@ -318,9 +490,13 @@ func startProxy(cfg *config) error {
}
listenerTLS := cfg.ec.ClientTLSInfo
if cfg.ec.ClientAutoTLS && cTLS {
listenerTLS, err = transport.SelfCert(cfg.ec.Logger, filepath.Join(cfg.ec.Dir, "clientCerts"), cHosts)
listenerTLS, err = transport.SelfCert(cfg.ec.GetLogger(), filepath.Join(cfg.ec.Dir, "clientCerts"), cHosts)
if err != nil {
plog.Fatalf("proxy: could not initialize self-signed client certs (%v)", err)
if lg != nil {
lg.Fatal("failed to initialize self-signed client cert", zap.Error(err))
} else {
plog.Fatalf("proxy: could not initialize self-signed client certs (%v)", err)
}
}
}
@ -333,7 +509,11 @@ func startProxy(cfg *config) error {
host := u.String()
go func() {
plog.Info("proxy: listening for client requests on ", host)
if lg != nil {
lg.Info("proxy started listening on client requests", zap.String("host", host))
} else {
plog.Info("proxy: listening for client requests on ", host)
}
mux := http.NewServeMux()
etcdhttp.HandlePrometheus(mux) // v2 proxy just uses the same port
mux.Handle("/", ph)
@ -345,13 +525,17 @@ func startProxy(cfg *config) error {
// identifyDataDirOrDie returns the type of the data dir.
// Dies if the datadir is invalid.
func identifyDataDirOrDie(dir string) dirType {
func identifyDataDirOrDie(lg *zap.Logger, dir string) dirType {
names, err := fileutil.ReadDir(dir)
if err != nil {
if os.IsNotExist(err) {
return dirEmpty
}
plog.Fatalf("error listing data dir: %s", dir)
if lg != nil {
lg.Fatal("failed to list data directory", zap.String("dir", dir), zap.Error(err))
} else {
plog.Fatalf("error listing data dir: %s", dir)
}
}
var m, p bool
@ -362,12 +546,24 @@ func identifyDataDirOrDie(dir string) dirType {
case dirProxy:
p = true
default:
plog.Warningf("found invalid file/dir %s under data dir %s (Ignore this if you are upgrading etcd)", name, dir)
if lg != nil {
lg.Warn(
"found invalid file under data directory",
zap.String("filename", name),
zap.String("data-dir", dir),
)
} else {
plog.Warningf("found invalid file/dir %s under data dir %s (Ignore this if you are upgrading etcd)", name, dir)
}
}
}
if m && p {
plog.Fatal("invalid datadir. Both member and proxy directories exist.")
if lg != nil {
lg.Fatal("invalid datadir; both member and proxy directories exist")
} else {
plog.Fatal("invalid datadir. Both member and proxy directories exist.")
}
}
if m {
return dirMember
@ -387,9 +583,10 @@ func checkSupportArch() {
// so unset here to not parse through flag
defer os.Unsetenv("ETCD_UNSUPPORTED_ARCH")
if env, ok := os.LookupEnv("ETCD_UNSUPPORTED_ARCH"); ok && env == runtime.GOARCH {
plog.Warningf("running etcd on unsupported architecture %q since ETCD_UNSUPPORTED_ARCH is set", env)
fmt.Printf("running etcd on unsupported architecture %q since ETCD_UNSUPPORTED_ARCH is set\n", env)
return
}
plog.Errorf("etcd on unsupported platform without ETCD_UNSUPPORTED_ARCH=%s set.", runtime.GOARCH)
fmt.Printf("etcd on unsupported platform without ETCD_UNSUPPORTED_ARCH=%s set\n", runtime.GOARCH)
os.Exit(1)
}

View File

@ -21,6 +21,8 @@ import (
"os"
"time"
"go.uber.org/zap"
"github.com/coreos/etcd/proxy/tcpproxy"
"github.com/spf13/cobra"
@ -79,16 +81,12 @@ func newGatewayStartCommand() *cobra.Command {
func stripSchema(eps []string) []string {
var endpoints []string
for _, ep := range eps {
if u, err := url.Parse(ep); err == nil && u.Host != "" {
ep = u.Host
}
endpoints = append(endpoints, ep)
}
return endpoints
}
@ -104,7 +102,8 @@ func startGateway(cmd *cobra.Command, args []string) {
for _, ep := range srvs.Endpoints {
h, p, err := net.SplitHostPort(ep)
if err != nil {
plog.Fatalf("error parsing endpoint %q", ep)
fmt.Printf("error parsing endpoint %q", ep)
os.Exit(1)
}
var port uint16
fmt.Sscanf(p, "%d", &port)
@ -113,23 +112,33 @@ func startGateway(cmd *cobra.Command, args []string) {
}
if len(srvs.Endpoints) == 0 {
plog.Fatalf("no endpoints found")
fmt.Println("no endpoints found")
os.Exit(1)
}
l, err := net.Listen("tcp", gatewayListenAddr)
var lg *zap.Logger
lg, err := zap.NewProduction()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
var l net.Listener
l, err = net.Listen("tcp", gatewayListenAddr)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
tp := tcpproxy.TCPProxy{
Logger: lg,
Listener: l,
Endpoints: srvs.SRVs,
MonitorInterval: getewayRetryDelay,
}
// At this point, etcd gateway listener is initialized
notifySystemd()
notifySystemd(lg)
tp.Run()
}

View File

@ -17,7 +17,7 @@ package etcdmain
import (
"context"
"fmt"
"io/ioutil"
"log"
"math"
"net"
"net/http"
@ -35,10 +35,10 @@ import (
"github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/pkg/debugutil"
"github.com/coreos/etcd/pkg/logutil"
"github.com/coreos/etcd/pkg/transport"
"github.com/coreos/etcd/proxy/grpcproxy"
"github.com/coreos/pkg/capnslog"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/soheilhy/cmux"
"github.com/spf13/cobra"
@ -148,61 +148,75 @@ func newGRPCProxyStartCommand() *cobra.Command {
func startGRPCProxy(cmd *cobra.Command, args []string) {
checkArgs()
capnslog.SetGlobalLogLevel(capnslog.INFO)
if grpcProxyDebug {
capnslog.SetGlobalLogLevel(capnslog.DEBUG)
grpc.EnableTracing = true
// enable info, warning, error
grpclog.SetLoggerV2(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr))
} else {
// only discard info
grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr))
lcfg := zap.Config{
Level: zap.NewAtomicLevelAt(zap.InfoLevel),
Development: false,
Sampling: &zap.SamplingConfig{
Initial: 100,
Thereafter: 100,
},
Encoding: "json",
EncoderConfig: zap.NewProductionEncoderConfig(),
}
if grpcProxyDebug {
lcfg.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
grpc.EnableTracing = true
}
lg, err := lcfg.Build()
if err != nil {
log.Fatal(err)
}
defer lg.Sync()
var gl grpclog.LoggerV2
gl, err = logutil.NewGRPCLoggerV2(lcfg)
if err != nil {
log.Fatal(err)
}
grpclog.SetLoggerV2(gl)
tlsinfo := newTLS(grpcProxyListenCA, grpcProxyListenCert, grpcProxyListenKey)
if tlsinfo == nil && grpcProxyListenAutoTLS {
host := []string{"https://" + grpcProxyListenAddr}
dir := filepath.Join(grpcProxyDataDir, "fixtures", "proxy")
lg, _ := zap.NewProduction()
if grpcProxyDebug {
lg = zap.NewExample()
}
autoTLS, err := transport.SelfCert(lg, dir, host)
if err != nil {
plog.Fatal(err)
log.Fatal(err)
}
tlsinfo = &autoTLS
}
if tlsinfo != nil {
plog.Infof("ServerTLS: %s", tlsinfo)
lg.Info("gRPC proxy server TLS", zap.String("tls-info", fmt.Sprintf("%+v", tlsinfo)))
}
m := mustListenCMux(tlsinfo)
m := mustListenCMux(lg, tlsinfo)
grpcl := m.Match(cmux.HTTP2())
defer func() {
grpcl.Close()
plog.Infof("stopping listening for grpc-proxy client requests on %s", grpcProxyListenAddr)
lg.Info("stopping listening gRPC proxy client requests", zap.String("address", grpcProxyListenAddr))
}()
client := mustNewClient()
client := mustNewClient(lg)
srvhttp, httpl := mustHTTPListener(m, tlsinfo, client)
srvhttp, httpl := mustHTTPListener(lg, m, tlsinfo, client)
errc := make(chan error)
go func() { errc <- newGRPCProxyServer(client).Serve(grpcl) }()
go func() { errc <- newGRPCProxyServer(lg, client).Serve(grpcl) }()
go func() { errc <- srvhttp.Serve(httpl) }()
go func() { errc <- m.Serve() }()
if len(grpcProxyMetricsListenAddr) > 0 {
mhttpl := mustMetricsListener(tlsinfo)
mhttpl := mustMetricsListener(lg, tlsinfo)
go func() {
mux := http.NewServeMux()
etcdhttp.HandlePrometheus(mux)
grpcproxy.HandleHealth(mux, client)
plog.Fatal(http.Serve(mhttpl, mux))
herr := http.Serve(mhttpl, mux)
lg.Fatal("gRPC proxy server serve returned", zap.Error(herr))
}()
}
// grpc-proxy is initialized, ready to serve
notifySystemd()
notifySystemd(lg)
fmt.Fprintln(os.Stderr, <-errc)
os.Exit(1)
@ -223,13 +237,13 @@ func checkArgs() {
}
}
func mustNewClient() *clientv3.Client {
func mustNewClient(lg *zap.Logger) *clientv3.Client {
srvs := discoverEndpoints(grpcProxyDNSCluster, grpcProxyCA, grpcProxyInsecureDiscovery)
eps := srvs.Endpoints
if len(eps) == 0 {
eps = grpcProxyEndpoints
}
cfg, err := newClientCfg(eps)
cfg, err := newClientCfg(lg, eps)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
@ -246,7 +260,7 @@ func mustNewClient() *clientv3.Client {
return client
}
func newClientCfg(eps []string) (*clientv3.Config, error) {
func newClientCfg(lg *zap.Logger, eps []string) (*clientv3.Config, error) {
// set tls if any one tls option set
cfg := clientv3.Config{
Endpoints: eps,
@ -271,7 +285,7 @@ func newClientCfg(eps []string) (*clientv3.Config, error) {
}
clientTLS.InsecureSkipVerify = grpcProxyInsecureSkipTLSVerify
cfg.TLS = clientTLS
plog.Infof("ClientTLS: %s", tls)
lg.Info("gRPC proxy client TLS", zap.String("tls-info", fmt.Sprintf("%+v", tls)))
}
return &cfg, nil
}
@ -283,7 +297,7 @@ func newTLS(ca, cert, key string) *transport.TLSInfo {
return &transport.TLSInfo{TrustedCAFile: ca, CertFile: cert, KeyFile: key}
}
func mustListenCMux(tlsinfo *transport.TLSInfo) cmux.CMux {
func mustListenCMux(lg *zap.Logger, tlsinfo *transport.TLSInfo) cmux.CMux {
l, err := net.Listen("tcp", grpcProxyListenAddr)
if err != nil {
fmt.Fprintln(os.Stderr, err)
@ -297,25 +311,25 @@ func mustListenCMux(tlsinfo *transport.TLSInfo) cmux.CMux {
if tlsinfo != nil {
tlsinfo.CRLFile = grpcProxyListenCRL
if l, err = transport.NewTLSListener(l, tlsinfo); err != nil {
plog.Fatal(err)
lg.Fatal("failed to create TLS listener", zap.Error(err))
}
}
plog.Infof("listening for grpc-proxy client requests on %s", grpcProxyListenAddr)
lg.Info("listening for gRPC proxy client requests", zap.String("address", grpcProxyListenAddr))
return cmux.New(l)
}
func newGRPCProxyServer(client *clientv3.Client) *grpc.Server {
func newGRPCProxyServer(lg *zap.Logger, client *clientv3.Client) *grpc.Server {
if grpcProxyEnableOrdering {
vf := ordering.NewOrderViolationSwitchEndpointClosure(*client)
client.KV = ordering.NewKV(client.KV, vf)
plog.Infof("waiting for linearized read from cluster to recover ordering")
lg.Info("waiting for linearized read from cluster to recover ordering")
for {
_, err := client.KV.Get(context.TODO(), "_", clientv3.WithKeysOnly())
if err == nil {
break
}
plog.Warningf("ordering recovery failed, retrying in 1s (%v)", err)
lg.Warn("ordering recovery failed, retrying in 1s", zap.Error(err))
time.Sleep(time.Second)
}
}
@ -363,7 +377,7 @@ func newGRPCProxyServer(client *clientv3.Client) *grpc.Server {
return server
}
func mustHTTPListener(m cmux.CMux, tlsinfo *transport.TLSInfo, c *clientv3.Client) (*http.Server, net.Listener) {
func mustHTTPListener(lg *zap.Logger, m cmux.CMux, tlsinfo *transport.TLSInfo, c *clientv3.Client) (*http.Server, net.Listener) {
httpmux := http.NewServeMux()
httpmux.HandleFunc("/", http.NotFound)
etcdhttp.HandlePrometheus(httpmux)
@ -372,7 +386,7 @@ func mustHTTPListener(m cmux.CMux, tlsinfo *transport.TLSInfo, c *clientv3.Clien
for p, h := range debugutil.PProfHandlers() {
httpmux.Handle(p, h)
}
plog.Infof("pprof is enabled under %s", debugutil.HTTPPrefixPProf)
lg.Info("gRPC proxy enabled pprof", zap.String("path", debugutil.HTTPPrefixPProf))
}
srvhttp := &http.Server{Handler: httpmux}
@ -382,13 +396,13 @@ func mustHTTPListener(m cmux.CMux, tlsinfo *transport.TLSInfo, c *clientv3.Clien
srvTLS, err := tlsinfo.ServerConfig()
if err != nil {
plog.Fatalf("could not setup TLS (%v)", err)
lg.Fatal("failed to set up TLS", zap.Error(err))
}
srvhttp.TLSConfig = srvTLS
return srvhttp, m.Match(cmux.Any())
}
func mustMetricsListener(tlsinfo *transport.TLSInfo) net.Listener {
func mustMetricsListener(lg *zap.Logger, tlsinfo *transport.TLSInfo) net.Listener {
murl, err := url.Parse(grpcProxyMetricsListenAddr)
if err != nil {
fmt.Fprintf(os.Stderr, "cannot parse %q", grpcProxyMetricsListenAddr)
@ -399,6 +413,6 @@ func mustMetricsListener(tlsinfo *transport.TLSInfo) net.Listener {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
plog.Info("grpc-proxy: listening for metrics on ", murl.String())
lg.Info("gRPC proxy listening for metrics", zap.String("address", murl.String()))
return ml
}

View File

@ -154,12 +154,16 @@ Profiling:
List of URLs to listen on for metrics.
Logging:
--debug 'false'
Enable debug-level logging for etcd.
--log-package-levels ''
Specify a particular log level for each etcd package (eg: 'etcdmain=CRITICAL,etcdserver=DEBUG').
--logger 'capnslog'
Specify 'zap' for structured logging or 'capnslog'.
--log-output 'default'
Specify 'stdout' or 'stderr' to skip journald logging even when running under systemd.
--debug 'false'
Enable debug-level logging for etcd.
Logging (to be deprecated in v3.5):
--log-package-levels ''
Specify a particular log level for each etcd package (eg: 'etcdmain=CRITICAL,etcdserver=DEBUG').
v2 Proxy (to be deprecated in v4):
--proxy 'off'

View File

@ -21,6 +21,7 @@ import (
"github.com/coreos/go-systemd/daemon"
systemdutil "github.com/coreos/go-systemd/util"
"go.uber.org/zap"
)
func Main() {
@ -46,15 +47,28 @@ func Main() {
startEtcdOrProxyV2()
}
func notifySystemd() {
func notifySystemd(lg *zap.Logger) {
if !systemdutil.IsRunningSystemd() {
return
}
if lg != nil {
lg.Info("host was booted with systemd, sends READY=1 message to init daemon")
}
sent, err := daemon.SdNotify(false, "READY=1")
if err != nil {
plog.Errorf("failed to notify systemd for readiness: %v", err)
if lg != nil {
lg.Error("failed to notify systemd for readiness", zap.Error(err))
} else {
plog.Errorf("failed to notify systemd for readiness: %v", err)
}
}
if !sent {
plog.Errorf("forgot to set Type=notify in systemd service file?")
if lg != nil {
lg.Warn("forgot to set Type=notify in systemd service file?")
} else {
plog.Errorf("forgot to set Type=notify in systemd service file?")
}
}
}

View File

@ -18,6 +18,7 @@ import (
"sync"
"github.com/coreos/etcd/version"
"go.uber.org/zap"
"github.com/coreos/go-semver/semver"
"github.com/coreos/pkg/capnslog"
@ -56,7 +57,7 @@ func init() {
}
// UpdateCapability updates the enabledMap when the cluster version increases.
func UpdateCapability(v *semver.Version) {
func UpdateCapability(lg *zap.Logger, v *semver.Version) {
if v == nil {
// if recovered but version was never set by cluster
return
@ -69,7 +70,15 @@ func UpdateCapability(v *semver.Version) {
curVersion = v
enabledMap = capabilityMaps[curVersion.String()]
enableMapMu.Unlock()
plog.Infof("enabled capabilities for version %s", version.Cluster(v.String()))
if lg != nil {
lg.Info(
"enabled capabilities for version",
zap.String("cluster-version", version.Cluster(v.String())),
)
} else {
plog.Infof("enabled capabilities for version %s", version.Cluster(v.String()))
}
}
func IsCapabilityEnabled(c Capability) bool {

View File

@ -27,6 +27,7 @@ import (
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/go-semver/semver"
"go.uber.org/zap"
)
type fakeStats struct{}
@ -36,12 +37,13 @@ func (s *fakeStats) LeaderStats() []byte { return nil }
func (s *fakeStats) StoreStats() []byte { return nil }
type v2v3Server struct {
lg *zap.Logger
c *clientv3.Client
store *v2v3Store
fakeStats
}
func NewServer(c *clientv3.Client, pfx string) etcdserver.ServerPeer {
func NewServer(lg *zap.Logger, c *clientv3.Client, pfx string) etcdserver.ServerPeer {
return &v2v3Server{c: c, store: newStore(c, pfx)}
}
@ -106,7 +108,7 @@ func (s *v2v3Server) Cluster() api.Cluster { return s }
func (s *v2v3Server) Alarms() []*pb.AlarmMember { return nil }
func (s *v2v3Server) Do(ctx context.Context, r pb.Request) (etcdserver.Response, error) {
applier := etcdserver.NewApplierV2(s.store, nil)
applier := etcdserver.NewApplierV2(s.lg, s.store, nil)
reqHandler := etcdserver.NewStoreRequestV2Handler(s.store, applier)
req := (*etcdserver.RequestV2)(&r)
resp, err := req.Handle(ctx, reqHandler)

View File

@ -22,15 +22,18 @@ import (
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/lease"
"go.uber.org/zap"
)
type LeaseServer struct {
lg *zap.Logger
hdr header
le etcdserver.Lessor
}
func NewLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
return &LeaseServer{le: s, hdr: newHeader(s)}
return &LeaseServer{lg: s.Cfg.Logger, le: s, hdr: newHeader(s)}
}
func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
@ -108,9 +111,17 @@ func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) erro
}
if err != nil {
if isClientCtxErr(stream.Context().Err(), err) {
plog.Debugf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error())
if ls.lg != nil {
ls.lg.Debug("failed to receive lease keepalive request from gRPC stream", zap.Error(err))
} else {
plog.Debugf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error())
}
} else {
plog.Warningf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error())
if ls.lg != nil {
ls.lg.Warn("failed to receive lease keepalive request from gRPC stream", zap.Error(err))
} else {
plog.Warningf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error())
}
}
return err
}
@ -138,9 +149,17 @@ func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) erro
err = stream.Send(resp)
if err != nil {
if isClientCtxErr(stream.Context().Err(), err) {
plog.Debugf("failed to send lease keepalive response to gRPC stream (%q)", err.Error())
if ls.lg != nil {
ls.lg.Debug("failed to send lease keepalive response to gRPC stream", zap.Error(err))
} else {
plog.Debugf("failed to send lease keepalive response to gRPC stream (%q)", err.Error())
}
} else {
plog.Warningf("failed to send lease keepalive response to gRPC stream (%q)", err.Error())
if ls.lg != nil {
ls.lg.Warn("failed to send lease keepalive response to gRPC stream", zap.Error(err))
} else {
plog.Warningf("failed to send lease keepalive response to gRPC stream (%q)", err.Error())
}
}
return err
}

View File

@ -27,6 +27,8 @@ import (
"github.com/coreos/etcd/mvcc/backend"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/version"
"go.uber.org/zap"
)
type KVGetter interface {
@ -54,6 +56,7 @@ type AuthGetter interface {
}
type maintenanceServer struct {
lg *zap.Logger
rg etcdserver.RaftStatusGetter
kg KVGetter
bg BackendGetter
@ -63,18 +66,30 @@ type maintenanceServer struct {
}
func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer {
srv := &maintenanceServer{rg: s, kg: s, bg: s, a: s, lt: s, hdr: newHeader(s)}
srv := &maintenanceServer{lg: s.Cfg.Logger, rg: s, kg: s, bg: s, a: s, lt: s, hdr: newHeader(s)}
return &authMaintenanceServer{srv, s}
}
func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
plog.Noticef("starting to defragment the storage backend...")
if ms.lg != nil {
ms.lg.Info("starting defragment")
} else {
plog.Noticef("starting to defragment the storage backend...")
}
err := ms.bg.Backend().Defrag()
if err != nil {
plog.Errorf("failed to defragment the storage backend (%v)", err)
if ms.lg != nil {
ms.lg.Warn("failed to defragment", zap.Error(err))
} else {
plog.Errorf("failed to defragment the storage backend (%v)", err)
}
return nil, err
}
plog.Noticef("finished defragmenting the storage backend")
if ms.lg != nil {
ms.lg.Info("finished defragment")
} else {
plog.Noticef("finished defragmenting the storage backend")
}
return &pb.DefragmentResponse{}, nil
}
@ -87,7 +102,11 @@ func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance
go func() {
snap.WriteTo(pw)
if err := snap.Close(); err != nil {
plog.Errorf("error closing snapshot (%v)", err)
if ms.lg != nil {
ms.lg.Warn("failed to close snapshot", zap.Error(err))
} else {
plog.Errorf("error closing snapshot (%v)", err)
}
}
pw.Close()
}()

View File

@ -52,7 +52,7 @@ func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error {
func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer {
return &quotaKVServer{
NewKVServer(s),
quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()},
quotaAlarmer{etcdserver.NewBackendQuota(s, "kv"), s, s.ID()},
}
}
@ -85,6 +85,6 @@ func (s *quotaLeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequ
func NewQuotaLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
return &quotaLeaseServer{
NewLeaseServer(s),
quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()},
quotaAlarmer{etcdserver.NewBackendQuota(s, "lease"), s, s.ID()},
}
}

View File

@ -27,6 +27,8 @@ import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/mvcc"
"github.com/coreos/etcd/mvcc/mvccpb"
"go.uber.org/zap"
)
type watchServer struct {
@ -36,6 +38,8 @@ type watchServer struct {
watchable mvcc.WatchableKV
ag AuthGetter
lg *zap.Logger
}
func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
@ -45,6 +49,7 @@ func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
sg: s,
watchable: s.Watchable(),
ag: s,
lg: s.Cfg.Logger,
}
}
@ -114,6 +119,8 @@ type serverWatchStream struct {
wg sync.WaitGroup
ag AuthGetter
lg *zap.Logger
}
func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
@ -133,6 +140,8 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
closec: make(chan struct{}),
ag: ws.ag,
lg: ws.lg,
}
sws.wg.Add(1)
@ -149,9 +158,17 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
go func() {
if rerr := sws.recvLoop(); rerr != nil {
if isClientCtxErr(stream.Context().Err(), rerr) {
plog.Debugf("failed to receive watch request from gRPC stream (%q)", rerr.Error())
if sws.lg != nil {
sws.lg.Debug("failed to receive watch request from gRPC stream", zap.Error(err))
} else {
plog.Debugf("failed to receive watch request from gRPC stream (%q)", rerr.Error())
}
} else {
plog.Warningf("failed to receive watch request from gRPC stream (%q)", rerr.Error())
if sws.lg != nil {
sws.lg.Warn("failed to receive watch request from gRPC stream", zap.Error(err))
} else {
plog.Warningf("failed to receive watch request from gRPC stream (%q)", rerr.Error())
}
}
errc <- rerr
}
@ -355,9 +372,17 @@ func (sws *serverWatchStream) sendLoop() {
mvcc.ReportEventReceived(len(evs))
if err := sws.gRPCStream.Send(wr); err != nil {
if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
plog.Debugf("failed to send watch response to gRPC stream (%q)", err.Error())
if sws.lg != nil {
sws.lg.Debug("failed to send watch response to gRPC stream", zap.Error(err))
} else {
plog.Debugf("failed to send watch response to gRPC stream (%q)", err.Error())
}
} else {
plog.Warningf("failed to send watch response to gRPC stream (%q)", err.Error())
if sws.lg != nil {
sws.lg.Warn("failed to send watch response to gRPC stream", zap.Error(err))
} else {
plog.Warningf("failed to send watch response to gRPC stream (%q)", err.Error())
}
}
return
}
@ -376,9 +401,17 @@ func (sws *serverWatchStream) sendLoop() {
if err := sws.gRPCStream.Send(c); err != nil {
if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
plog.Debugf("failed to send watch control response to gRPC stream (%q)", err.Error())
if sws.lg != nil {
sws.lg.Debug("failed to send watch control response to gRPC stream", zap.Error(err))
} else {
plog.Debugf("failed to send watch control response to gRPC stream (%q)", err.Error())
}
} else {
plog.Warningf("failed to send watch control response to gRPC stream (%q)", err.Error())
if sws.lg != nil {
sws.lg.Warn("failed to send watch control response to gRPC stream", zap.Error(err))
} else {
plog.Warningf("failed to send watch control response to gRPC stream (%q)", err.Error())
}
}
return
}
@ -396,9 +429,17 @@ func (sws *serverWatchStream) sendLoop() {
mvcc.ReportEventReceived(len(v.Events))
if err := sws.gRPCStream.Send(v); err != nil {
if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
plog.Debugf("failed to send pending watch response to gRPC stream (%q)", err.Error())
if sws.lg != nil {
sws.lg.Debug("failed to send pending watch response to gRPC stream", zap.Error(err))
} else {
plog.Debugf("failed to send pending watch response to gRPC stream (%q)", err.Error())
}
} else {
plog.Warningf("failed to send pending watch response to gRPC stream (%q)", err.Error())
if sws.lg != nil {
sws.lg.Warn("failed to send pending watch response to gRPC stream", zap.Error(err))
} else {
plog.Warningf("failed to send pending watch response to gRPC stream (%q)", err.Error())
}
}
return
}

View File

@ -17,6 +17,7 @@ package etcdserver
import (
"bytes"
"context"
"fmt"
"sort"
"time"
@ -26,6 +27,7 @@ import (
"github.com/coreos/etcd/mvcc"
"github.com/coreos/etcd/mvcc/mvccpb"
"github.com/coreos/etcd/pkg/types"
"go.uber.org/zap"
"github.com/gogo/protobuf/proto"
)
@ -107,7 +109,7 @@ func (s *EtcdServer) newApplierV3() applierV3 {
}
func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult {
defer warnOfExpensiveRequest(time.Now(), r)
defer warnOfExpensiveRequest(a.s.getLogger(), time.Now(), r)
ar := &applyResult{}
@ -503,25 +505,39 @@ func (a *applierV3backend) applyTxn(txn mvcc.TxnWrite, rt *pb.TxnRequest, txnPat
if !txnPath[0] {
reqs = rt.Failure
}
lg := a.s.getLogger()
for i, req := range reqs {
respi := tresp.Responses[i].Response
switch tv := req.Request.(type) {
case *pb.RequestOp_RequestRange:
resp, err := a.Range(txn, tv.RequestRange)
if err != nil {
plog.Panicf("unexpected error during txn: %v", err)
if lg != nil {
lg.Panic("unexpected error during txn", zap.Error(err))
} else {
plog.Panicf("unexpected error during txn: %v", err)
}
}
respi.(*pb.ResponseOp_ResponseRange).ResponseRange = resp
case *pb.RequestOp_RequestPut:
resp, err := a.Put(txn, tv.RequestPut)
if err != nil {
plog.Panicf("unexpected error during txn: %v", err)
if lg != nil {
lg.Panic("unexpected error during txn", zap.Error(err))
} else {
plog.Panicf("unexpected error during txn: %v", err)
}
}
respi.(*pb.ResponseOp_ResponsePut).ResponsePut = resp
case *pb.RequestOp_RequestDeleteRange:
resp, err := a.DeleteRange(txn, tv.RequestDeleteRange)
if err != nil {
plog.Panicf("unexpected error during txn: %v", err)
if lg != nil {
lg.Panic("unexpected error during txn", zap.Error(err))
} else {
plog.Panicf("unexpected error during txn: %v", err)
}
}
respi.(*pb.ResponseOp_ResponseDeleteRange).ResponseDeleteRange = resp
case *pb.RequestOp_RequestTxn:
@ -569,6 +585,7 @@ func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error)
resp := &pb.AlarmResponse{}
oldCount := len(a.s.alarmStore.Get(ar.Alarm))
lg := a.s.getLogger()
switch ar.Action {
case pb.AlarmRequest_GET:
resp.Alarms = a.s.alarmStore.Get(ar.Alarm)
@ -583,14 +600,22 @@ func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error)
break
}
plog.Warningf("alarm %v raised by peer %s", m.Alarm, types.ID(m.MemberID))
if lg != nil {
lg.Warn("alarm raised", zap.String("alarm", m.Alarm.String()), zap.String("from", types.ID(m.MemberID).String()))
} else {
plog.Warningf("alarm %v raised by peer %s", m.Alarm, types.ID(m.MemberID))
}
switch m.Alarm {
case pb.AlarmType_CORRUPT:
a.s.applyV3 = newApplierV3Corrupt(a)
case pb.AlarmType_NOSPACE:
a.s.applyV3 = newApplierV3Capped(a)
default:
plog.Errorf("unimplemented alarm activation (%+v)", m)
if lg != nil {
lg.Warn("unimplemented alarm activation", zap.String("alarm", fmt.Sprintf("%+v", m)))
} else {
plog.Errorf("unimplemented alarm activation (%+v)", m)
}
}
case pb.AlarmRequest_DEACTIVATE:
m := a.s.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm)
@ -606,10 +631,18 @@ func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error)
switch m.Alarm {
case pb.AlarmType_NOSPACE, pb.AlarmType_CORRUPT:
// TODO: check kv hash before deactivating CORRUPT?
plog.Infof("alarm disarmed %+v", ar)
if lg != nil {
lg.Warn("alarm disarmed", zap.String("alarm", m.Alarm.String()), zap.String("from", types.ID(m.MemberID).String()))
} else {
plog.Infof("alarm disarmed %+v", ar)
}
a.s.applyV3 = a.s.newApplierV3()
default:
plog.Errorf("unimplemented alarm deactivation (%+v)", m)
if lg != nil {
lg.Warn("unimplemented alarm deactivation", zap.String("alarm", fmt.Sprintf("%+v", m)))
} else {
plog.Errorf("unimplemented alarm deactivation (%+v)", m)
}
}
default:
return nil, nil
@ -773,7 +806,7 @@ type quotaApplierV3 struct {
}
func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 {
return &quotaApplierV3{app, NewBackendQuota(s)}
return &quotaApplierV3{app, NewBackendQuota(s, "v3-applier")}
}
func (a *quotaApplierV3) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) {

View File

@ -25,6 +25,7 @@ import (
"github.com/coreos/etcd/pkg/pbutil"
"github.com/coreos/go-semver/semver"
"go.uber.org/zap"
)
// ApplierV2 is the interface for processing V2 raft messages
@ -36,11 +37,12 @@ type ApplierV2 interface {
Sync(r *RequestV2) Response
}
func NewApplierV2(s v2store.Store, c *membership.RaftCluster) ApplierV2 {
func NewApplierV2(lg *zap.Logger, s v2store.Store, c *membership.RaftCluster) ApplierV2 {
return &applierV2store{store: s, cluster: c}
}
type applierV2store struct {
lg *zap.Logger
store v2store.Store
cluster *membership.RaftCluster
}
@ -77,7 +79,11 @@ func (a *applierV2store) Put(r *RequestV2) Response {
id := membership.MustParseMemberIDFromKey(path.Dir(r.Path))
var attr membership.Attributes
if err := json.Unmarshal([]byte(r.Val), &attr); err != nil {
plog.Panicf("unmarshal %s should never fail: %v", r.Val, err)
if a.lg != nil {
a.lg.Panic("failed to unmarshal", zap.String("value", r.Val), zap.Error(err))
} else {
plog.Panicf("unmarshal %s should never fail: %v", r.Val, err)
}
}
if a.cluster != nil {
a.cluster.UpdateAttributes(id, attr)
@ -108,7 +114,7 @@ func (a *applierV2store) Sync(r *RequestV2) Response {
// applyV2Request interprets r as a call to v2store.X
// and returns a Response interpreted from v2store.Event
func (s *EtcdServer) applyV2Request(r *RequestV2) Response {
defer warnOfExpensiveRequest(time.Now(), r)
defer warnOfExpensiveRequest(s.getLogger(), time.Now(), r)
switch r.Method {
case "POST":

View File

@ -24,11 +24,13 @@ import (
"github.com/coreos/etcd/mvcc/backend"
"github.com/coreos/etcd/raft/raftpb"
"github.com/coreos/etcd/raftsnap"
"go.uber.org/zap"
)
func newBackend(cfg ServerConfig) backend.Backend {
bcfg := backend.DefaultBackendConfig()
bcfg.Path = cfg.backendPath()
bcfg.Logger = cfg.Logger
if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes {
// permit 10% excess over quota for disarm
bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10)
@ -51,17 +53,32 @@ func openSnapshotBackend(cfg ServerConfig, ss *raftsnap.Snapshotter, snapshot ra
// openBackend returns a backend using the current etcd db.
func openBackend(cfg ServerConfig) backend.Backend {
fn := cfg.backendPath()
beOpened := make(chan backend.Backend)
now, beOpened := time.Now(), make(chan backend.Backend)
go func() {
beOpened <- newBackend(cfg)
}()
select {
case be := <-beOpened:
if cfg.Logger != nil {
cfg.Logger.Info("opened backend db", zap.String("path", fn), zap.Duration("took", time.Since(now)))
}
return be
case <-time.After(10 * time.Second):
plog.Warningf("another etcd process is using %q and holds the file lock, or loading backend file is taking >10 seconds", fn)
plog.Warningf("waiting for it to exit before starting...")
if cfg.Logger != nil {
cfg.Logger.Info(
"db file is flocked by another process, or taking too long",
zap.String("path", fn),
zap.Duration("took", time.Since(now)),
)
} else {
plog.Warningf("another etcd process is using %q and holds the file lock, or loading backend file is taking >10 seconds", fn)
plog.Warningf("waiting for it to exit before starting...")
}
}
return <-beOpened
}
@ -71,11 +88,11 @@ func openBackend(cfg ServerConfig) backend.Backend {
// case, replace the db with the snapshot db sent by the leader.
func recoverSnapshotBackend(cfg ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot) (backend.Backend, error) {
var cIndex consistentIndex
kv := mvcc.New(oldbe, &lease.FakeLessor{}, &cIndex)
kv := mvcc.New(cfg.Logger, oldbe, &lease.FakeLessor{}, &cIndex)
defer kv.Close()
if snapshot.Metadata.Index <= kv.ConsistentIndex() {
return oldbe, nil
}
oldbe.Close()
return openSnapshotBackend(cfg, raftsnap.New(cfg.SnapDir()), snapshot)
return openSnapshotBackend(cfg, raftsnap.New(cfg.Logger, cfg.SnapDir()), snapshot)
}

View File

@ -27,12 +27,13 @@ import (
"github.com/coreos/etcd/version"
"github.com/coreos/go-semver/semver"
"go.uber.org/zap"
)
// isMemberBootstrapped tries to check if the given member has been bootstrapped
// in the given cluster.
func isMemberBootstrapped(cl *membership.RaftCluster, member string, rt http.RoundTripper, timeout time.Duration) bool {
rcl, err := getClusterFromRemotePeers(getRemotePeerURLs(cl, member), timeout, false, rt)
func isMemberBootstrapped(lg *zap.Logger, cl *membership.RaftCluster, member string, rt http.RoundTripper, timeout time.Duration) bool {
rcl, err := getClusterFromRemotePeers(lg, getRemotePeerURLs(cl, member), timeout, false, rt)
if err != nil {
return false
}
@ -54,21 +55,26 @@ func isMemberBootstrapped(cl *membership.RaftCluster, member string, rt http.Rou
// response, an error is returned.
// Each request has a 10-second timeout. Because the upper limit of TTL is 5s,
// 10 second is enough for building connection and finishing request.
func GetClusterFromRemotePeers(urls []string, rt http.RoundTripper) (*membership.RaftCluster, error) {
return getClusterFromRemotePeers(urls, 10*time.Second, true, rt)
func GetClusterFromRemotePeers(lg *zap.Logger, urls []string, rt http.RoundTripper) (*membership.RaftCluster, error) {
return getClusterFromRemotePeers(lg, urls, 10*time.Second, true, rt)
}
// If logerr is true, it prints out more error messages.
func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool, rt http.RoundTripper) (*membership.RaftCluster, error) {
func getClusterFromRemotePeers(lg *zap.Logger, urls []string, timeout time.Duration, logerr bool, rt http.RoundTripper) (*membership.RaftCluster, error) {
cc := &http.Client{
Transport: rt,
Timeout: timeout,
}
for _, u := range urls {
resp, err := cc.Get(u + "/members")
addr := u + "/members"
resp, err := cc.Get(addr)
if err != nil {
if logerr {
plog.Warningf("could not get cluster response from %s: %v", u, err)
if lg != nil {
lg.Warn("failed to get cluster response", zap.String("address", addr), zap.Error(err))
} else {
plog.Warningf("could not get cluster response from %s: %v", u, err)
}
}
continue
}
@ -76,21 +82,38 @@ func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool
resp.Body.Close()
if err != nil {
if logerr {
plog.Warningf("could not read the body of cluster response: %v", err)
if lg != nil {
lg.Warn("failed to read body of cluster response", zap.String("address", addr), zap.Error(err))
} else {
plog.Warningf("could not read the body of cluster response: %v", err)
}
}
continue
}
var membs []*membership.Member
if err = json.Unmarshal(b, &membs); err != nil {
if logerr {
plog.Warningf("could not unmarshal cluster response: %v", err)
if lg != nil {
lg.Warn("failed to unmarshal cluster response", zap.String("address", addr), zap.Error(err))
} else {
plog.Warningf("could not unmarshal cluster response: %v", err)
}
}
continue
}
id, err := types.IDFromString(resp.Header.Get("X-Etcd-Cluster-ID"))
if err != nil {
if logerr {
plog.Warningf("could not parse the cluster ID from cluster res: %v", err)
if lg != nil {
lg.Warn(
"failed to parse cluster ID",
zap.String("address", addr),
zap.String("header", resp.Header.Get("X-Etcd-Cluster-ID")),
zap.Error(err),
)
} else {
plog.Warningf("could not parse the cluster ID from cluster res: %v", err)
}
}
continue
}
@ -100,12 +123,11 @@ func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool
// if membership members are not present then the raft cluster formed will be
// an invalid empty cluster hence return failed to get raft cluster member(s) from the given urls error
if len(membs) > 0 {
return membership.NewClusterFromMembers("", id, membs), nil
return membership.NewClusterFromMembers(lg, "", id, membs), nil
}
return nil, fmt.Errorf("failed to get raft cluster member(s) from the given urls.")
return nil, fmt.Errorf("failed to get raft cluster member(s) from the given URLs")
}
return nil, fmt.Errorf("could not retrieve cluster information from the given urls")
return nil, fmt.Errorf("could not retrieve cluster information from the given URLs")
}
// getRemotePeerURLs returns peer urls of remote members in the cluster. The
@ -126,7 +148,7 @@ func getRemotePeerURLs(cl *membership.RaftCluster, local string) []string {
// The key of the returned map is the member's ID. The value of the returned map
// is the semver versions string, including server and cluster.
// If it fails to get the version of a member, the key will be nil.
func getVersions(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions {
func getVersions(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions {
members := cl.Members()
vers := make(map[string]*version.Versions)
for _, m := range members {
@ -138,9 +160,13 @@ func getVersions(cl *membership.RaftCluster, local types.ID, rt http.RoundTrippe
vers[m.ID.String()] = &version.Versions{Server: version.Version, Cluster: cv}
continue
}
ver, err := getVersion(m, rt)
ver, err := getVersion(lg, m, rt)
if err != nil {
plog.Warningf("cannot get the version of member %s (%v)", m.ID, err)
if lg != nil {
lg.Warn("failed to get version", zap.String("remote-member-id", m.ID.String()), zap.Error(err))
} else {
plog.Warningf("cannot get the version of member %s (%v)", m.ID, err)
}
vers[m.ID.String()] = nil
} else {
vers[m.ID.String()] = ver
@ -152,7 +178,7 @@ func getVersions(cl *membership.RaftCluster, local types.ID, rt http.RoundTrippe
// decideClusterVersion decides the cluster version based on the versions map.
// The returned version is the min server version in the map, or nil if the min
// version in unknown.
func decideClusterVersion(vers map[string]*version.Versions) *semver.Version {
func decideClusterVersion(lg *zap.Logger, vers map[string]*version.Versions) *semver.Version {
var cv *semver.Version
lv := semver.Must(semver.NewVersion(version.Version))
@ -162,12 +188,30 @@ func decideClusterVersion(vers map[string]*version.Versions) *semver.Version {
}
v, err := semver.NewVersion(ver.Server)
if err != nil {
plog.Errorf("cannot understand the version of member %s (%v)", mid, err)
if lg != nil {
lg.Warn(
"failed to parse server version of remote member",
zap.String("remote-member-id", mid),
zap.String("remote-member-version", ver.Server),
zap.Error(err),
)
} else {
plog.Errorf("cannot understand the version of member %s (%v)", mid, err)
}
return nil
}
if lv.LessThan(*v) {
plog.Warningf("the local etcd version %s is not up-to-date", lv.String())
plog.Warningf("member %s has a higher version %s", mid, ver.Server)
if lg != nil {
lg.Warn(
"local etcd version is not up-to-date",
zap.String("local-member-version", lv.String()),
zap.String("remote-member-id", mid),
zap.String("remote-member-version", ver.Server),
)
} else {
plog.Warningf("the local etcd version %s is not up-to-date", lv.String())
plog.Warningf("member %s has a higher version %s", mid, ver.Server)
}
}
if cv == nil {
cv = v
@ -184,19 +228,18 @@ func decideClusterVersion(vers map[string]*version.Versions) *semver.Version {
// cluster version in the range of [MinClusterVersion, Version] and no known members has a cluster version
// out of the range.
// We set this rule since when the local member joins, another member might be offline.
func isCompatibleWithCluster(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool {
vers := getVersions(cl, local, rt)
func isCompatibleWithCluster(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool {
vers := getVersions(lg, cl, local, rt)
minV := semver.Must(semver.NewVersion(version.MinClusterVersion))
maxV := semver.Must(semver.NewVersion(version.Version))
maxV = &semver.Version{
Major: maxV.Major,
Minor: maxV.Minor,
}
return isCompatibleWithVers(vers, local, minV, maxV)
return isCompatibleWithVers(lg, vers, local, minV, maxV)
}
func isCompatibleWithVers(vers map[string]*version.Versions, local types.ID, minV, maxV *semver.Version) bool {
func isCompatibleWithVers(lg *zap.Logger, vers map[string]*version.Versions, local types.ID, minV, maxV *semver.Version) bool {
var ok bool
for id, v := range vers {
// ignore comparison with local version
@ -208,15 +251,42 @@ func isCompatibleWithVers(vers map[string]*version.Versions, local types.ID, min
}
clusterv, err := semver.NewVersion(v.Cluster)
if err != nil {
plog.Errorf("cannot understand the cluster version of member %s (%v)", id, err)
if lg != nil {
lg.Warn(
"failed to parse cluster version of remote member",
zap.String("remote-member-id", id),
zap.String("remote-member-cluster-version", v.Cluster),
zap.Error(err),
)
} else {
plog.Errorf("cannot understand the cluster version of member %s (%v)", id, err)
}
continue
}
if clusterv.LessThan(*minV) {
plog.Warningf("the running cluster version(%v) is lower than the minimal cluster version(%v) supported", clusterv.String(), minV.String())
if lg != nil {
lg.Warn(
"cluster version of remote member is not compatible; too low",
zap.String("remote-member-id", id),
zap.String("remote-member-cluster-version", clusterv.String()),
zap.String("minimum-cluster-version-supported", minV.String()),
)
} else {
plog.Warningf("the running cluster version(%v) is lower than the minimal cluster version(%v) supported", clusterv.String(), minV.String())
}
return false
}
if maxV.LessThan(*clusterv) {
plog.Warningf("the running cluster version(%v) is higher than the maximum cluster version(%v) supported", clusterv.String(), maxV.String())
if lg != nil {
lg.Warn(
"cluster version of remote member is not compatible; too high",
zap.String("remote-member-id", id),
zap.String("remote-member-cluster-version", clusterv.String()),
zap.String("minimum-cluster-version-supported", minV.String()),
)
} else {
plog.Warningf("the running cluster version(%v) is higher than the maximum cluster version(%v) supported", clusterv.String(), maxV.String())
}
return false
}
ok = true
@ -226,7 +296,7 @@ func isCompatibleWithVers(vers map[string]*version.Versions, local types.ID, min
// getVersion returns the Versions of the given member via its
// peerURLs. Returns the last error if it fails to get the version.
func getVersion(m *membership.Member, rt http.RoundTripper) (*version.Versions, error) {
func getVersion(lg *zap.Logger, m *membership.Member, rt http.RoundTripper) (*version.Versions, error) {
cc := &http.Client{
Transport: rt,
}
@ -236,21 +306,49 @@ func getVersion(m *membership.Member, rt http.RoundTripper) (*version.Versions,
)
for _, u := range m.PeerURLs {
resp, err = cc.Get(u + "/version")
addr := u + "/version"
resp, err = cc.Get(addr)
if err != nil {
plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err)
if lg != nil {
lg.Warn(
"failed to reach the peer URL",
zap.String("address", addr),
zap.String("remote-member-id", m.ID.String()),
zap.Error(err),
)
} else {
plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err)
}
continue
}
var b []byte
b, err = ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
plog.Warningf("failed to read out the response body from the peerURL(%s) of member %s (%v)", u, m.ID, err)
if lg != nil {
lg.Warn(
"failed to read body of response",
zap.String("address", addr),
zap.String("remote-member-id", m.ID.String()),
zap.Error(err),
)
} else {
plog.Warningf("failed to read out the response body from the peerURL(%s) of member %s (%v)", u, m.ID, err)
}
continue
}
var vers version.Versions
if err = json.Unmarshal(b, &vers); err != nil {
plog.Warningf("failed to unmarshal the response body got from the peerURL(%s) of member %s (%v)", u, m.ID, err)
if lg != nil {
lg.Warn(
"failed to unmarshal response",
zap.String("address", addr),
zap.String("remote-member-id", m.ID.String()),
zap.Error(err),
)
} else {
plog.Warningf("failed to unmarshal the response body got from the peerURL(%s) of member %s (%v)", u, m.ID, err)
}
continue
}
return &vers, nil

View File

@ -22,8 +22,11 @@ import (
"github.com/coreos/etcd/version"
"github.com/coreos/go-semver/semver"
"go.uber.org/zap"
)
var testLogger = zap.NewExample()
func TestDecideClusterVersion(t *testing.T) {
tests := []struct {
vers map[string]*version.Versions
@ -53,7 +56,7 @@ func TestDecideClusterVersion(t *testing.T) {
}
for i, tt := range tests {
dver := decideClusterVersion(tt.vers)
dver := decideClusterVersion(testLogger, tt.vers)
if !reflect.DeepEqual(dver, tt.wdver) {
t.Errorf("#%d: ver = %+v, want %+v", i, dver, tt.wdver)
}
@ -124,7 +127,7 @@ func TestIsCompatibleWithVers(t *testing.T) {
}
for i, tt := range tests {
ok := isCompatibleWithVers(tt.vers, tt.local, tt.minV, tt.maxV)
ok := isCompatibleWithVers(testLogger, tt.vers, tt.local, tt.minV, tt.maxV)
if ok != tt.wok {
t.Errorf("#%d: ok = %+v, want %+v", i, ok, tt.wok)
}

View File

@ -25,6 +25,8 @@ import (
"github.com/coreos/etcd/pkg/netutil"
"github.com/coreos/etcd/pkg/transport"
"github.com/coreos/etcd/pkg/types"
"go.uber.org/zap"
)
// ServerConfig holds the configuration of etcd as taken from the command line or discovery.
@ -80,6 +82,12 @@ type ServerConfig struct {
// PreVote is true to enable Raft Pre-Vote.
PreVote bool
// Logger logs server-side operations.
// If not nil, it disables "capnslog" and uses the given logger.
Logger *zap.Logger
// LoggerConfig is server logger configuration for Raft logger.
LoggerConfig zap.Config
Debug bool
ForceNewCluster bool
@ -214,28 +222,68 @@ func (c *ServerConfig) PrintWithInitial() { c.print(true) }
func (c *ServerConfig) Print() { c.print(false) }
func (c *ServerConfig) print(initial bool) {
plog.Infof("name = %s", c.Name)
if c.ForceNewCluster {
plog.Infof("force new cluster")
}
plog.Infof("data dir = %s", c.DataDir)
plog.Infof("member dir = %s", c.MemberDir())
if c.DedicatedWALDir != "" {
plog.Infof("dedicated WAL dir = %s", c.DedicatedWALDir)
}
plog.Infof("heartbeat = %dms", c.TickMs)
plog.Infof("election = %dms", c.ElectionTicks*int(c.TickMs))
plog.Infof("snapshot count = %d", c.SnapCount)
if len(c.DiscoveryURL) != 0 {
plog.Infof("discovery URL= %s", c.DiscoveryURL)
if len(c.DiscoveryProxy) != 0 {
plog.Infof("discovery proxy = %s", c.DiscoveryProxy)
// TODO: remove this after dropping "capnslog"
if c.Logger == nil {
plog.Infof("name = %s", c.Name)
if c.ForceNewCluster {
plog.Infof("force new cluster")
}
}
plog.Infof("advertise client URLs = %s", c.ClientURLs)
if initial {
plog.Infof("initial advertise peer URLs = %s", c.PeerURLs)
plog.Infof("initial cluster = %s", c.InitialPeerURLsMap)
plog.Infof("data dir = %s", c.DataDir)
plog.Infof("member dir = %s", c.MemberDir())
if c.DedicatedWALDir != "" {
plog.Infof("dedicated WAL dir = %s", c.DedicatedWALDir)
}
plog.Infof("heartbeat = %dms", c.TickMs)
plog.Infof("election = %dms", c.ElectionTicks*int(c.TickMs))
plog.Infof("snapshot count = %d", c.SnapCount)
if len(c.DiscoveryURL) != 0 {
plog.Infof("discovery URL= %s", c.DiscoveryURL)
if len(c.DiscoveryProxy) != 0 {
plog.Infof("discovery proxy = %s", c.DiscoveryProxy)
}
}
plog.Infof("advertise client URLs = %s", c.ClientURLs)
if initial {
plog.Infof("initial advertise peer URLs = %s", c.PeerURLs)
plog.Infof("initial cluster = %s", c.InitialPeerURLsMap)
}
} else {
caddrs := make([]string, len(c.ClientURLs))
for i := range c.ClientURLs {
caddrs[i] = c.ClientURLs[i].String()
}
paddrs := make([]string, len(c.PeerURLs))
for i := range c.PeerURLs {
paddrs[i] = c.PeerURLs[i].String()
}
state := "new"
if !c.NewCluster {
state = "existing"
}
c.Logger.Info(
"server starting",
zap.String("name", c.Name),
zap.String("data-dir", c.DataDir),
zap.String("member-dir", c.MemberDir()),
zap.String("dedicated-wal-dir", c.DedicatedWALDir),
zap.Bool("force-new-cluster", c.ForceNewCluster),
zap.Uint("heartbeat-tick-ms", c.TickMs),
zap.String("heartbeat-interval", fmt.Sprintf("%v", time.Duration(c.TickMs)*time.Millisecond)),
zap.Int("election-tick-ms", c.ElectionTicks),
zap.String("election-timeout", fmt.Sprintf("%v", time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond)),
zap.Uint64("snapshot-count", c.SnapCount),
zap.Strings("advertise-client-urls", caddrs),
zap.Strings("initial-advertise-peer-urls", paddrs),
zap.Bool("initial", initial),
zap.String("initial-cluster", c.InitialPeerURLsMap.String()),
zap.String("initial-cluster-state", state),
zap.String("initial-cluster-token", c.InitialClusterToken),
zap.Bool("pre-vote", c.PreVote),
zap.Bool("initial-corrupt-check", c.InitialCorruptCheck),
zap.Duration("corrupt-check-time", c.CorruptCheckTime),
zap.String("discovery-url", c.DiscoveryURL),
zap.String("discovery-proxy", c.DiscoveryProxy),
)
}
}

View File

@ -19,6 +19,8 @@ import (
"testing"
"github.com/coreos/etcd/pkg/types"
"go.uber.org/zap"
)
func mustNewURLs(t *testing.T, urls []string) []url.URL {
@ -37,6 +39,7 @@ func TestConfigVerifyBootstrapWithoutClusterAndDiscoveryURLFail(t *testing.T) {
Name: "node1",
DiscoveryURL: "",
InitialPeerURLsMap: types.URLsMap{},
Logger: zap.NewExample(),
}
if err := c.VerifyBootstrap(); err == nil {
t.Errorf("err = nil, want not nil")
@ -54,6 +57,7 @@ func TestConfigVerifyExistingWithDiscoveryURLFail(t *testing.T) {
PeerURLs: mustNewURLs(t, []string{"http://127.0.0.1:2380"}),
InitialPeerURLsMap: cluster,
NewCluster: false,
Logger: zap.NewExample(),
}
if err := c.VerifyJoinExisting(); err == nil {
t.Errorf("err = nil, want not nil")
@ -141,6 +145,7 @@ func TestConfigVerifyLocalMember(t *testing.T) {
cfg := ServerConfig{
Name: "node1",
InitialPeerURLsMap: cluster,
Logger: zap.NewExample(),
}
if tt.apurls != nil {
cfg.PeerURLs = mustNewURLs(t, tt.apurls)
@ -165,6 +170,7 @@ func TestSnapDir(t *testing.T) {
for dd, w := range tests {
cfg := ServerConfig{
DataDir: dd,
Logger: zap.NewExample(),
}
if g := cfg.SnapDir(); g != w {
t.Errorf("DataDir=%q: SnapDir()=%q, want=%q", dd, g, w)
@ -180,6 +186,7 @@ func TestWALDir(t *testing.T) {
for dd, w := range tests {
cfg := ServerConfig{
DataDir: dd,
Logger: zap.NewExample(),
}
if g := cfg.WALDir(); g != w {
t.Errorf("DataDir=%q: WALDir()=%q, want=%q", dd, g, w)
@ -196,6 +203,7 @@ func TestShouldDiscover(t *testing.T) {
for durl, w := range tests {
cfg := ServerConfig{
DiscoveryURL: durl,
Logger: zap.NewExample(),
}
if g := cfg.ShouldDiscover(); g != w {
t.Errorf("durl=%q: ShouldDiscover()=%t, want=%t", durl, g, w)

View File

@ -24,6 +24,9 @@ import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/mvcc"
"github.com/coreos/etcd/pkg/types"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// CheckInitialHashKV compares initial hash values with its peers
@ -34,7 +37,18 @@ func (s *EtcdServer) CheckInitialHashKV() error {
return nil
}
plog.Infof("%s starting initial corruption check with timeout %v...", s.ID(), s.Cfg.ReqTimeout())
lg := s.getLogger()
if lg != nil {
lg.Info(
"starting initial corruption check",
zap.String("local-member-id", s.ID().String()),
zap.Duration("timeout", s.Cfg.ReqTimeout()),
)
} else {
plog.Infof("%s starting initial corruption check with timeout %v...", s.ID(), s.Cfg.ReqTimeout())
}
h, rev, crev, err := s.kv.HashByRev(0)
if err != nil {
return fmt.Errorf("%s failed to fetch hash (%v)", s.ID(), err)
@ -44,22 +58,70 @@ func (s *EtcdServer) CheckInitialHashKV() error {
for _, p := range peers {
if p.resp != nil {
peerID := types.ID(p.resp.Header.MemberId)
fields := []zapcore.Field{
zap.String("local-member-id", s.ID().String()),
zap.Int64("local-member-revision", rev),
zap.Int64("local-member-compact-revision", crev),
zap.Uint32("local-member-hash", h),
zap.String("remote-member-id", peerID.String()),
zap.Strings("remote-member-endpoints", p.eps),
zap.Int64("remote-member-revision", p.resp.Header.Revision),
zap.Int64("remote-member-compact-revision", p.resp.CompactRevision),
zap.Uint32("remote-member-hash", p.resp.Hash),
}
if h != p.resp.Hash {
if crev == p.resp.CompactRevision {
plog.Errorf("%s's hash %d != %s's hash %d (revision %d, peer revision %d, compact revision %d)", s.ID(), h, peerID, p.resp.Hash, rev, p.resp.Header.Revision, crev)
if lg != nil {
lg.Warn("found different hash values from remote peer", fields...)
} else {
plog.Errorf("%s's hash %d != %s's hash %d (revision %d, peer revision %d, compact revision %d)", s.ID(), h, peerID, p.resp.Hash, rev, p.resp.Header.Revision, crev)
}
mismatch++
} else {
plog.Warningf("%s cannot check hash of peer(%s): peer has a different compact revision %d (revision:%d)", s.ID(), peerID, p.resp.CompactRevision, rev)
if lg != nil {
lg.Warn("found different compact revision values from remote peer", fields...)
} else {
plog.Warningf("%s cannot check hash of peer(%s): peer has a different compact revision %d (revision:%d)", s.ID(), peerID, p.resp.CompactRevision, rev)
}
}
}
continue
}
if p.err != nil {
switch p.err {
case rpctypes.ErrFutureRev:
plog.Warningf("%s cannot check the hash of peer(%q) at revision %d: peer is lagging behind(%q)", s.ID(), p.eps, rev, p.err.Error())
if lg != nil {
lg.Warn(
"cannot fetch hash from slow remote peer",
zap.String("local-member-id", s.ID().String()),
zap.Int64("local-member-revision", rev),
zap.Int64("local-member-compact-revision", crev),
zap.Uint32("local-member-hash", h),
zap.String("remote-member-id", p.id.String()),
zap.Strings("remote-member-endpoints", p.eps),
zap.Error(err),
)
} else {
plog.Warningf("%s cannot check the hash of peer(%q) at revision %d: peer is lagging behind(%q)", s.ID(), p.eps, rev, p.err.Error())
}
case rpctypes.ErrCompacted:
plog.Warningf("%s cannot check the hash of peer(%q) at revision %d: local node is lagging behind(%q)", s.ID(), p.eps, rev, p.err.Error())
if lg != nil {
lg.Warn(
"cannot fetch hash from remote peer; local member is behind",
zap.String("local-member-id", s.ID().String()),
zap.Int64("local-member-revision", rev),
zap.Int64("local-member-compact-revision", crev),
zap.Uint32("local-member-hash", h),
zap.String("remote-member-id", p.id.String()),
zap.Strings("remote-member-endpoints", p.eps),
zap.Error(err),
)
} else {
plog.Warningf("%s cannot check the hash of peer(%q) at revision %d: local node is lagging behind(%q)", s.ID(), p.eps, rev, p.err.Error())
}
}
}
}
@ -67,7 +129,14 @@ func (s *EtcdServer) CheckInitialHashKV() error {
return fmt.Errorf("%s found data inconsistency with peers", s.ID())
}
plog.Infof("%s succeeded on initial corruption checking: no corruption", s.ID())
if lg != nil {
lg.Info(
"initial corruption checking passed; no corruption",
zap.String("local-member-id", s.ID().String()),
)
} else {
plog.Infof("%s succeeded on initial corruption checking: no corruption", s.ID())
}
return nil
}
@ -76,7 +145,18 @@ func (s *EtcdServer) monitorKVHash() {
if t == 0 {
return
}
plog.Infof("enabled corruption checking with %s interval", t)
lg := s.getLogger()
if lg != nil {
lg.Info(
"enabled corruption checking",
zap.String("local-member-id", s.ID().String()),
zap.Duration("interval", t),
)
} else {
plog.Infof("enabled corruption checking with %s interval", t)
}
for {
select {
case <-s.stopping:
@ -87,15 +167,21 @@ func (s *EtcdServer) monitorKVHash() {
continue
}
if err := s.checkHashKV(); err != nil {
plog.Debugf("check hash kv failed %v", err)
if lg != nil {
lg.Warn("failed to check hash KV", zap.Error(err))
} else {
plog.Debugf("check hash kv failed %v", err)
}
}
}
}
func (s *EtcdServer) checkHashKV() error {
lg := s.getLogger()
h, rev, crev, err := s.kv.HashByRev(0)
if err != nil {
plog.Fatalf("failed to hash kv store (%v)", err)
return err
}
peers := s.getPeerHashKVs(rev)
@ -108,7 +194,6 @@ func (s *EtcdServer) checkHashKV() error {
h2, rev2, crev2, err := s.kv.HashByRev(0)
if err != nil {
plog.Warningf("failed to hash kv store (%v)", err)
return err
}
@ -129,7 +214,19 @@ func (s *EtcdServer) checkHashKV() error {
}
if h2 != h && rev2 == rev && crev == crev2 {
plog.Warningf("mismatched hashes %d and %d for revision %d", h, h2, rev)
if lg != nil {
lg.Warn(
"found hash mismatch",
zap.Int64("revision-1", rev),
zap.Int64("compact-revision-1", crev),
zap.Uint32("hash-1", h),
zap.Int64("revision-2", rev2),
zap.Int64("compact-revision-2", crev2),
zap.Uint32("hash-2", h2),
)
} else {
plog.Warningf("mismatched hashes %d and %d for revision %d", h, h2, rev)
}
mismatch(uint64(s.ID()))
}
@ -141,34 +238,63 @@ func (s *EtcdServer) checkHashKV() error {
// leader expects follower's latest revision less than or equal to leader's
if p.resp.Header.Revision > rev2 {
plog.Warningf(
"revision %d from member %v, expected at most %d",
p.resp.Header.Revision,
types.ID(id),
rev2)
if lg != nil {
lg.Warn(
"revision from follower must be less than or equal to leader's",
zap.Int64("leader-revision", rev2),
zap.Int64("follower-revision", p.resp.Header.Revision),
zap.String("follower-peer-id", types.ID(id).String()),
)
} else {
plog.Warningf(
"revision %d from member %v, expected at most %d",
p.resp.Header.Revision,
types.ID(id),
rev2)
}
mismatch(id)
}
// leader expects follower's latest compact revision less than or equal to leader's
if p.resp.CompactRevision > crev2 {
plog.Warningf(
"compact revision %d from member %v, expected at most %d",
p.resp.CompactRevision,
types.ID(id),
crev2,
)
if lg != nil {
lg.Warn(
"compact revision from follower must be less than or equal to leader's",
zap.Int64("leader-compact-revision", crev2),
zap.Int64("follower-compact-revision", p.resp.CompactRevision),
zap.String("follower-peer-id", types.ID(id).String()),
)
} else {
plog.Warningf(
"compact revision %d from member %v, expected at most %d",
p.resp.CompactRevision,
types.ID(id),
crev2,
)
}
mismatch(id)
}
// follower's compact revision is leader's old one, then hashes must match
if p.resp.CompactRevision == crev && p.resp.Hash != h {
plog.Warningf(
"hash %d at revision %d from member %v, expected hash %d",
p.resp.Hash,
rev,
types.ID(id),
h,
)
if lg != nil {
lg.Warn(
"same compact revision then hashes must match",
zap.Int64("leader-compact-revision", crev2),
zap.Uint32("leader-hash", h),
zap.Int64("follower-compact-revision", p.resp.CompactRevision),
zap.Uint32("follower-hash", p.resp.Hash),
zap.String("follower-peer-id", types.ID(id).String()),
)
} else {
plog.Warningf(
"hash %d at revision %d from member %v, expected hash %d",
p.resp.Hash,
rev,
types.ID(id),
h,
)
}
mismatch(id)
}
}
@ -176,33 +302,47 @@ func (s *EtcdServer) checkHashKV() error {
}
type peerHashKVResp struct {
id types.ID
eps []string
resp *clientv3.HashKVResponse
err error
eps []string
}
func (s *EtcdServer) getPeerHashKVs(rev int64) (resps []*peerHashKVResp) {
// TODO: handle the case when "s.cluster.Members" have not
// been populated (e.g. no snapshot to load from disk)
mbs := s.cluster.Members()
pURLs := make([][]string, len(mbs))
pss := make([]peerHashKVResp, len(mbs))
for _, m := range mbs {
if m.ID == s.ID() {
continue
}
pURLs = append(pURLs, m.PeerURLs)
pss = append(pss, peerHashKVResp{id: m.ID, eps: m.PeerURLs})
}
for _, purls := range pURLs {
if len(purls) == 0 {
lg := s.getLogger()
for _, p := range pss {
if len(p.eps) == 0 {
continue
}
cli, cerr := clientv3.New(clientv3.Config{
DialTimeout: s.Cfg.ReqTimeout(),
Endpoints: purls,
Endpoints: p.eps,
})
if cerr != nil {
plog.Warningf("%s failed to create client to peer %q for hash checking (%q)", s.ID(), purls, cerr.Error())
if lg != nil {
lg.Warn(
"failed to create client to peer URL",
zap.String("local-member-id", s.ID().String()),
zap.String("remote-member-id", p.id.String()),
zap.Strings("remote-member-endpoints", p.eps),
zap.Error(cerr),
)
} else {
plog.Warningf("%s failed to create client to peer %q for hash checking (%q)", s.ID(), p.eps, cerr.Error())
}
continue
}
@ -213,15 +353,25 @@ func (s *EtcdServer) getPeerHashKVs(rev int64) (resps []*peerHashKVResp) {
resp, cerr = cli.HashKV(ctx, c, rev)
cancel()
if cerr == nil {
resps = append(resps, &peerHashKVResp{resp: resp})
resps = append(resps, &peerHashKVResp{id: p.id, eps: p.eps, resp: resp, err: nil})
break
}
plog.Warningf("%s hash-kv error %q on peer %q with revision %d", s.ID(), cerr.Error(), c, rev)
if lg != nil {
lg.Warn(
"failed hash kv request",
zap.String("local-member-id", s.ID().String()),
zap.Int64("requested-revision", rev),
zap.String("remote-member-endpoint", c),
zap.Error(cerr),
)
} else {
plog.Warningf("%s hash-kv error %q on peer %q with revision %d", s.ID(), cerr.Error(), c, rev)
}
}
cli.Close()
if respsLen == len(resps) {
resps = append(resps, &peerHashKVResp{err: cerr, eps: purls})
resps = append(resps, &peerHashKVResp{id: p.id, eps: p.eps, resp: nil, err: cerr})
}
}
return resps

View File

@ -36,10 +36,13 @@ import (
"github.com/coreos/etcd/version"
"github.com/coreos/go-semver/semver"
"go.uber.org/zap"
)
// RaftCluster is a list of Members that belong to the same raft cluster
type RaftCluster struct {
lg *zap.Logger
id types.ID
token string
@ -54,8 +57,8 @@ type RaftCluster struct {
removed map[types.ID]bool
}
func NewClusterFromURLsMap(token string, urlsmap types.URLsMap) (*RaftCluster, error) {
c := NewCluster(token)
func NewClusterFromURLsMap(lg *zap.Logger, token string, urlsmap types.URLsMap) (*RaftCluster, error) {
c := NewCluster(lg, token)
for name, urls := range urlsmap {
m := NewMember(name, urls, token, nil)
if _, ok := c.members[m.ID]; ok {
@ -70,8 +73,8 @@ func NewClusterFromURLsMap(token string, urlsmap types.URLsMap) (*RaftCluster, e
return c, nil
}
func NewClusterFromMembers(token string, id types.ID, membs []*Member) *RaftCluster {
c := NewCluster(token)
func NewClusterFromMembers(lg *zap.Logger, token string, id types.ID, membs []*Member) *RaftCluster {
c := NewCluster(lg, token)
c.id = id
for _, m := range membs {
c.members[m.ID] = m
@ -79,8 +82,9 @@ func NewClusterFromMembers(token string, id types.ID, membs []*Member) *RaftClus
return c
}
func NewCluster(token string) *RaftCluster {
func NewCluster(lg *zap.Logger, token string) *RaftCluster {
return &RaftCluster{
lg: lg,
token: token,
members: make(map[types.ID]*Member),
removed: make(map[types.ID]bool),
@ -115,7 +119,11 @@ func (c *RaftCluster) MemberByName(name string) *Member {
for _, m := range c.members {
if m.Name == name {
if memb != nil {
plog.Panicf("two members with the given name %q exist", name)
if c.lg != nil {
c.lg.Panic("two member with same name found", zap.String("name", name))
} else {
plog.Panicf("two members with the given name %q exist", name)
}
}
memb = m
}
@ -203,27 +211,43 @@ func (c *RaftCluster) SetBackend(be backend.Backend) {
mustCreateBackendBuckets(c.be)
}
func (c *RaftCluster) Recover(onSet func(*semver.Version)) {
func (c *RaftCluster) Recover(onSet func(*zap.Logger, *semver.Version)) {
c.Lock()
defer c.Unlock()
c.members, c.removed = membersFromStore(c.v2store)
c.version = clusterVersionFromStore(c.v2store)
mustDetectDowngrade(c.version)
onSet(c.version)
c.members, c.removed = membersFromStore(c.lg, c.v2store)
c.version = clusterVersionFromStore(c.lg, c.v2store)
mustDetectDowngrade(c.lg, c.version)
onSet(c.lg, c.version)
for _, m := range c.members {
plog.Infof("added member %s %v to cluster %s from store", m.ID, m.PeerURLs, c.id)
if c.lg != nil {
c.lg.Info(
"added member from store",
zap.String("cluster-id", c.id.String()),
zap.String("member-id", m.ID.String()),
zap.Strings("member-peer-urls", m.PeerURLs),
)
} else {
plog.Infof("added member %s %v to cluster %s from store", m.ID, m.PeerURLs, c.id)
}
}
if c.version != nil {
plog.Infof("set the cluster version to %v from store", version.Cluster(c.version.String()))
if c.lg != nil {
c.lg.Info(
"set cluster version from store",
zap.String("cluster-version", version.Cluster(c.version.String())),
)
} else {
plog.Infof("set the cluster version to %v from store", version.Cluster(c.version.String()))
}
}
}
// ValidateConfigurationChange takes a proposed ConfChange and
// ensures that it is still valid.
func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange) error {
members, removed := membersFromStore(c.v2store)
members, removed := membersFromStore(c.lg, c.v2store)
id := types.ID(cc.NodeID)
if removed[id] {
return ErrIDRemoved
@ -241,17 +265,23 @@ func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange) error {
}
m := new(Member)
if err := json.Unmarshal(cc.Context, m); err != nil {
plog.Panicf("unmarshal member should never fail: %v", err)
if c.lg != nil {
c.lg.Panic("failed to unmarshal member", zap.Error(err))
} else {
plog.Panicf("unmarshal member should never fail: %v", err)
}
}
for _, u := range m.PeerURLs {
if urls[u] {
return ErrPeerURLexists
}
}
case raftpb.ConfChangeRemoveNode:
if members[id] == nil {
return ErrIDNotFound
}
case raftpb.ConfChangeUpdateNode:
if members[id] == nil {
return ErrIDNotFound
@ -267,15 +297,24 @@ func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange) error {
}
m := new(Member)
if err := json.Unmarshal(cc.Context, m); err != nil {
plog.Panicf("unmarshal member should never fail: %v", err)
if c.lg != nil {
c.lg.Panic("failed to unmarshal member", zap.Error(err))
} else {
plog.Panicf("unmarshal member should never fail: %v", err)
}
}
for _, u := range m.PeerURLs {
if urls[u] {
return ErrPeerURLexists
}
}
default:
plog.Panicf("ConfChange type should be either AddNode, RemoveNode or UpdateNode")
if c.lg != nil {
c.lg.Panic("unknown ConfChange type", zap.String("type", cc.Type.String()))
} else {
plog.Panicf("ConfChange type should be either AddNode, RemoveNode or UpdateNode")
}
}
return nil
}
@ -295,7 +334,16 @@ func (c *RaftCluster) AddMember(m *Member) {
c.members[m.ID] = m
plog.Infof("added member %s %v to cluster %s", m.ID, m.PeerURLs, c.id)
if c.lg != nil {
c.lg.Info(
"added member",
zap.String("member-id", m.ID.String()),
zap.Strings("member-peer-urls", m.PeerURLs),
zap.String("cluster-id", c.id.String()),
)
} else {
plog.Infof("added member %s %v to cluster %s", m.ID, m.PeerURLs, c.id)
}
}
// RemoveMember removes a member from the store.
@ -313,7 +361,15 @@ func (c *RaftCluster) RemoveMember(id types.ID) {
delete(c.members, id)
c.removed[id] = true
plog.Infof("removed member %s from cluster %s", id, c.id)
if c.lg != nil {
c.lg.Info(
"removed member",
zap.String("member-id", id.String()),
zap.String("cluster-id", c.id.String()),
)
} else {
plog.Infof("removed member %s from cluster %s", id, c.id)
}
}
func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes) {
@ -331,9 +387,18 @@ func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes) {
}
_, ok := c.removed[id]
if !ok {
plog.Panicf("error updating attributes of unknown member %s", id)
if c.lg != nil {
c.lg.Panic("failed to update; member unknown", zap.String("member-id", id.String()))
} else {
plog.Panicf("error updating attributes of unknown member %s", id)
}
}
if c.lg != nil {
c.lg.Warn("skipped attributes update of removed member", zap.String("member-id", id.String()))
} else {
plog.Warningf("skipped updating attributes of removed member %s", id)
}
plog.Warningf("skipped updating attributes of removed member %s", id)
}
func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes) {
@ -348,7 +413,16 @@ func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes)
mustSaveMemberToBackend(c.be, c.members[id])
}
plog.Noticef("updated member %s %v in cluster %s", id, raftAttr.PeerURLs, c.id)
if c.lg != nil {
c.lg.Info(
"updated member",
zap.String("member-id", id.String()),
zap.Strings("member-peer-urls", raftAttr.PeerURLs),
zap.String("cluster-id", c.id.String()),
)
} else {
plog.Noticef("updated member %s %v in cluster %s", id, raftAttr.PeerURLs, c.id)
}
}
func (c *RaftCluster) Version() *semver.Version {
@ -360,23 +434,38 @@ func (c *RaftCluster) Version() *semver.Version {
return semver.Must(semver.NewVersion(c.version.String()))
}
func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*semver.Version)) {
func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*zap.Logger, *semver.Version)) {
c.Lock()
defer c.Unlock()
if c.version != nil {
plog.Noticef("updated the cluster version from %v to %v", version.Cluster(c.version.String()), version.Cluster(ver.String()))
if c.lg != nil {
c.lg.Info(
"updated cluster version",
zap.String("from", version.Cluster(c.version.String())),
zap.String("from", version.Cluster(ver.String())),
)
} else {
plog.Noticef("updated the cluster version from %v to %v", version.Cluster(c.version.String()), version.Cluster(ver.String()))
}
} else {
plog.Noticef("set the initial cluster version to %v", version.Cluster(ver.String()))
if c.lg != nil {
c.lg.Info(
"set initial cluster version",
zap.String("cluster-version", version.Cluster(ver.String())),
)
} else {
plog.Noticef("set the initial cluster version to %v", version.Cluster(ver.String()))
}
}
c.version = ver
mustDetectDowngrade(c.version)
mustDetectDowngrade(c.lg, c.version)
if c.v2store != nil {
mustSaveClusterVersionToStore(c.v2store, ver)
}
if c.be != nil {
mustSaveClusterVersionToBackend(c.be, ver)
}
onSet(ver)
onSet(c.lg, ver)
}
func (c *RaftCluster) IsReadyToAddNewMember() bool {
@ -393,14 +482,25 @@ func (c *RaftCluster) IsReadyToAddNewMember() bool {
if nstarted == 1 && nmembers == 2 {
// a case of adding a new node to 1-member cluster for restoring cluster data
// https://github.com/coreos/etcd/blob/master/Documentation/v2/admin_guide.md#restoring-the-cluster
plog.Debugf("The number of started member is 1. This cluster can accept add member request.")
if c.lg != nil {
c.lg.Debug("number of started member is 1; can accept add member request")
} else {
plog.Debugf("The number of started member is 1. This cluster can accept add member request.")
}
return true
}
nquorum := nmembers/2 + 1
if nstarted < nquorum {
plog.Warningf("Reject add member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum)
if c.lg != nil {
c.lg.Warn(
"rejecting member add; started member will be less than quorum",
zap.Int("number-of-started-member", nstarted),
zap.Int("quorum", nquorum),
)
} else {
plog.Warningf("Reject add member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum)
}
return false
}
@ -424,14 +524,22 @@ func (c *RaftCluster) IsReadyToRemoveMember(id uint64) bool {
nquorum := nmembers/2 + 1
if nstarted < nquorum {
plog.Warningf("Reject remove member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum)
if c.lg != nil {
c.lg.Warn(
"rejecting member remove; started member will be less than quorum",
zap.Int("number-of-started-member", nstarted),
zap.Int("quorum", nquorum),
)
} else {
plog.Warningf("Reject remove member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum)
}
return false
}
return true
}
func membersFromStore(st v2store.Store) (map[types.ID]*Member, map[types.ID]bool) {
func membersFromStore(lg *zap.Logger, st v2store.Store) (map[types.ID]*Member, map[types.ID]bool) {
members := make(map[types.ID]*Member)
removed := make(map[types.ID]bool)
e, err := st.Get(StoreMembersPrefix, true, true)
@ -439,13 +547,21 @@ func membersFromStore(st v2store.Store) (map[types.ID]*Member, map[types.ID]bool
if isKeyNotFound(err) {
return members, removed
}
plog.Panicf("get storeMembers should never fail: %v", err)
if lg != nil {
lg.Panic("failed to get members from store", zap.String("path", StoreMembersPrefix), zap.Error(err))
} else {
plog.Panicf("get storeMembers should never fail: %v", err)
}
}
for _, n := range e.Node.Nodes {
var m *Member
m, err = nodeToMember(n)
if err != nil {
plog.Panicf("nodeToMember should never fail: %v", err)
if lg != nil {
lg.Panic("failed to nodeToMember", zap.Error(err))
} else {
plog.Panicf("nodeToMember should never fail: %v", err)
}
}
members[m.ID] = m
}
@ -455,7 +571,15 @@ func membersFromStore(st v2store.Store) (map[types.ID]*Member, map[types.ID]bool
if isKeyNotFound(err) {
return members, removed
}
plog.Panicf("get storeRemovedMembers should never fail: %v", err)
if lg != nil {
lg.Panic(
"failed to get removed members from store",
zap.String("path", storeRemovedMembersPrefix),
zap.Error(err),
)
} else {
plog.Panicf("get storeRemovedMembers should never fail: %v", err)
}
}
for _, n := range e.Node.Nodes {
removed[MustParseMemberIDFromKey(n.Key)] = true
@ -463,13 +587,21 @@ func membersFromStore(st v2store.Store) (map[types.ID]*Member, map[types.ID]bool
return members, removed
}
func clusterVersionFromStore(st v2store.Store) *semver.Version {
func clusterVersionFromStore(lg *zap.Logger, st v2store.Store) *semver.Version {
e, err := st.Get(path.Join(storePrefix, "version"), false, false)
if err != nil {
if isKeyNotFound(err) {
return nil
}
plog.Panicf("unexpected error (%v) when getting cluster version from store", err)
if lg != nil {
lg.Panic(
"failed to get cluster version from store",
zap.String("path", path.Join(storePrefix, "version")),
zap.Error(err),
)
} else {
plog.Panicf("unexpected error (%v) when getting cluster version from store", err)
}
}
return semver.Must(semver.NewVersion(*e.Node.Value))
}
@ -502,11 +634,19 @@ func ValidateClusterAndAssignIDs(local *RaftCluster, existing *RaftCluster) erro
return nil
}
func mustDetectDowngrade(cv *semver.Version) {
func mustDetectDowngrade(lg *zap.Logger, cv *semver.Version) {
lv := semver.Must(semver.NewVersion(version.Version))
// only keep major.minor version for comparison against cluster version
lv = &semver.Version{Major: lv.Major, Minor: lv.Minor}
if cv != nil && lv.LessThan(*cv) {
plog.Fatalf("cluster cannot be downgraded (current version: %s is lower than determined cluster version: %s).", version.Version, version.Cluster(cv.String()))
if lg != nil {
lg.Fatal(
"invalid downgrade; server version is lower than determined cluster version",
zap.String("current-server-version", version.Version),
zap.String("determined-cluster-version", version.Cluster(cv.String())),
)
} else {
plog.Fatalf("cluster cannot be downgraded (current version: %s is lower than determined cluster version: %s).", version.Version, version.Cluster(cv.String()))
}
}
}

View File

@ -26,6 +26,8 @@ import (
"github.com/coreos/etcd/pkg/testutil"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/raft/raftpb"
"go.uber.org/zap"
)
func TestClusterMember(t *testing.T) {
@ -274,7 +276,7 @@ func TestClusterValidateAndAssignIDs(t *testing.T) {
}
func TestClusterValidateConfigurationChange(t *testing.T) {
cl := NewCluster("")
cl := NewCluster(zap.NewExample(), "")
cl.SetStore(v2store.New())
for i := 1; i <= 4; i++ {
attr := RaftAttributes{PeerURLs: []string{fmt.Sprintf("http://127.0.0.1:%d", i)}}
@ -559,7 +561,7 @@ func TestNodeToMember(t *testing.T) {
}
func newTestCluster(membs []*Member) *RaftCluster {
c := &RaftCluster{members: make(map[types.ID]*Member), removed: make(map[types.ID]bool)}
c := &RaftCluster{lg: zap.NewExample(), members: make(map[types.ID]*Member), removed: make(map[types.ID]bool)}
for _, m := range membs {
c.members[m.ID] = m
}

View File

@ -77,7 +77,7 @@ func NewMember(name string, peerURLs types.URLs, clusterName string, now *time.T
// It will panic if there is no PeerURLs available in Member.
func (m *Member) PickPeerURL() string {
if len(m.PeerURLs) == 0 {
plog.Panicf("member should always have some peer url")
panic("member should always have some peer url")
}
return m.PeerURLs[rand.Intn(len(m.PeerURLs))]
}

View File

@ -16,6 +16,9 @@ package etcdserver
import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
humanize "github.com/dustin/go-humanize"
"go.uber.org/zap"
)
const (
@ -57,18 +60,58 @@ const (
kvOverhead = 256
)
func NewBackendQuota(s *EtcdServer) Quota {
func NewBackendQuota(s *EtcdServer, name string) Quota {
lg := s.getLogger()
if s.Cfg.QuotaBackendBytes < 0 {
// disable quotas if negative
plog.Warningf("disabling backend quota")
if lg != nil {
lg.Info(
"disabled backend quota",
zap.String("quota-name", name),
zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes),
)
} else {
plog.Warningf("disabling backend quota")
}
return &passthroughQuota{}
}
if s.Cfg.QuotaBackendBytes == 0 {
// use default size if no quota size given
if lg != nil {
lg.Info(
"enabled backend quota with default value",
zap.String("quota-name", name),
zap.Int64("quota-size-bytes", DefaultQuotaBytes),
zap.String("quota-size", humanize.Bytes(uint64(DefaultQuotaBytes))),
)
}
return &backendQuota{s, DefaultQuotaBytes}
}
if s.Cfg.QuotaBackendBytes > MaxQuotaBytes {
plog.Warningf("backend quota %v exceeds maximum recommended quota %v", s.Cfg.QuotaBackendBytes, MaxQuotaBytes)
if lg != nil {
lg.Warn(
"quota exceeds the maximum value",
zap.String("quota-name", name),
zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes),
zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))),
zap.Int64("quota-maximum-size-bytes", MaxQuotaBytes),
zap.String("quota-maximum-size", humanize.Bytes(uint64(MaxQuotaBytes))),
)
} else {
plog.Warningf("backend quota %v exceeds maximum recommended quota %v", s.Cfg.QuotaBackendBytes, MaxQuotaBytes)
}
}
if lg != nil {
lg.Info(
"enabled backend quota",
zap.String("quota-name", name),
zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes),
zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))),
)
}
return &backendQuota{s, s.Cfg.QuotaBackendBytes}
}

View File

@ -17,6 +17,7 @@ package etcdserver
import (
"encoding/json"
"expvar"
"log"
"sort"
"sync"
"time"
@ -24,6 +25,7 @@ import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/etcdserver/membership"
"github.com/coreos/etcd/pkg/contention"
"github.com/coreos/etcd/pkg/logutil"
"github.com/coreos/etcd/pkg/pbutil"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/raft"
@ -33,6 +35,7 @@ import (
"github.com/coreos/etcd/wal/walpb"
"github.com/coreos/pkg/capnslog"
"go.uber.org/zap"
)
const (
@ -83,6 +86,8 @@ type apply struct {
}
type raftNode struct {
lg *zap.Logger
tickMu *sync.Mutex
raftNodeConfig
@ -105,6 +110,8 @@ type raftNode struct {
}
type raftNodeConfig struct {
lg *zap.Logger
// to check if msg receiver is removed from cluster
isIDRemoved func(id uint64) bool
raft.Node
@ -120,6 +127,7 @@ type raftNodeConfig struct {
func newRaftNode(cfg raftNodeConfig) *raftNode {
r := &raftNode{
lg: cfg.lg,
tickMu: new(sync.Mutex),
raftNodeConfig: cfg,
// set up contention detectors for raft heartbeat message.
@ -182,7 +190,11 @@ func (r *raftNode) start(rh *raftReadyHandler) {
select {
case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
case <-time.After(internalTimeout):
plog.Warningf("timed out sending read state")
if r.lg != nil {
r.lg.Warn("timed out sending read state", zap.Duration("timeout", internalTimeout))
} else {
plog.Warningf("timed out sending read state")
}
case <-r.stopped:
return
}
@ -213,7 +225,11 @@ func (r *raftNode) start(rh *raftReadyHandler) {
// gofail: var raftBeforeSave struct{}
if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
plog.Fatalf("raft save state and entries error: %v", err)
if r.lg != nil {
r.lg.Fatal("failed to save Raft hard state and entries", zap.Error(err))
} else {
plog.Fatalf("raft save state and entries error: %v", err)
}
}
if !raft.IsEmptyHardState(rd.HardState) {
proposalsCommitted.Set(float64(rd.HardState.Commit))
@ -223,14 +239,22 @@ func (r *raftNode) start(rh *raftReadyHandler) {
if !raft.IsEmptySnap(rd.Snapshot) {
// gofail: var raftBeforeSaveSnap struct{}
if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
plog.Fatalf("raft save snapshot error: %v", err)
if r.lg != nil {
r.lg.Fatal("failed to save Raft snapshot", zap.Error(err))
} else {
plog.Fatalf("raft save snapshot error: %v", err)
}
}
// etcdserver now claim the snapshot has been persisted onto the disk
notifyc <- struct{}{}
// gofail: var raftAfterSaveSnap struct{}
r.raftStorage.ApplySnapshot(rd.Snapshot)
plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index)
if r.lg != nil {
r.lg.Info("applied incoming Raft snapshot", zap.Uint64("snapshot-index", rd.Snapshot.Metadata.Index))
} else {
plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index)
}
// gofail: var raftAfterApplySnap struct{}
}
@ -327,8 +351,16 @@ func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message {
ok, exceed := r.td.Observe(ms[i].To)
if !ok {
// TODO: limit request rate.
plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed)
plog.Warningf("server is likely overloaded")
if r.lg != nil {
r.lg.Warn(
"heartbeat took too long to send out; server is overloaded, likely from slow disk",
zap.Duration("exceeded", exceed),
zap.Duration("heartbeat-interval", r.heartbeat),
)
} else {
plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed)
plog.Warningf("server is likely overloaded")
}
}
}
}
@ -349,7 +381,11 @@ func (r *raftNode) onStop() {
r.ticker.Stop()
r.transport.Stop()
if err := r.storage.Close(); err != nil {
plog.Panicf("raft close storage error: %v", err)
if r.lg != nil {
r.lg.Panic("failed to close Raft storage", zap.Error(err))
} else {
plog.Panicf("raft close storage error: %v", err)
}
}
close(r.done)
}
@ -384,19 +420,36 @@ func startNode(cfg ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id
ClusterID: uint64(cl.ID()),
},
)
if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
plog.Fatalf("create wal error: %v", err)
if w, err = wal.Create(cfg.Logger, cfg.WALDir(), metadata); err != nil {
if cfg.Logger != nil {
cfg.Logger.Fatal("failed to create WAL", zap.Error(err))
} else {
plog.Fatalf("create wal error: %v", err)
}
}
peers := make([]raft.Peer, len(ids))
for i, id := range ids {
ctx, err := json.Marshal((*cl).Member(id))
var ctx []byte
ctx, err = json.Marshal((*cl).Member(id))
if err != nil {
plog.Panicf("marshal member should never fail: %v", err)
if cfg.Logger != nil {
cfg.Logger.Panic("failed to marshal member", zap.Error(err))
} else {
plog.Panicf("marshal member should never fail: %v", err)
}
}
peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
}
id = member.ID
plog.Infof("starting member %s in cluster %s", id, cl.ID())
if cfg.Logger != nil {
cfg.Logger.Info(
"starting local member",
zap.String("local-member-id", id.String()),
zap.String("cluster-id", cl.ID().String()),
)
} else {
plog.Infof("starting member %s in cluster %s", id, cl.ID())
}
s = raft.NewMemoryStorage()
c := &raft.Config{
ID: uint64(id),
@ -408,6 +461,13 @@ func startNode(cfg ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id
CheckQuorum: true,
PreVote: cfg.PreVote,
}
if cfg.Logger != nil {
// called after capnslog setting in "init" function
c.Logger, err = logutil.NewRaftLogger(cfg.LoggerConfig)
if err != nil {
log.Fatalf("cannot create raft logger %v", err)
}
}
n = raft.StartNode(c, peers)
raftStatusMu.Lock()
@ -421,10 +481,19 @@ func restartNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *member
if snapshot != nil {
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
}
w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap)
plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit)
cl := membership.NewCluster("")
if cfg.Logger != nil {
cfg.Logger.Info(
"restarting local member",
zap.String("local-member-id", id.String()),
zap.String("cluster-id", cid.String()),
zap.Uint64("commit-index", st.Commit),
)
} else {
plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit)
}
cl := membership.NewCluster(cfg.Logger, "")
cl.SetID(cid)
s := raft.NewMemoryStorage()
if snapshot != nil {
@ -442,6 +511,14 @@ func restartNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *member
CheckQuorum: true,
PreVote: cfg.PreVote,
}
if cfg.Logger != nil {
// called after capnslog setting in "init" function
var err error
c.Logger, err = logutil.NewRaftLogger(cfg.LoggerConfig)
if err != nil {
log.Fatalf("cannot create raft logger %v", err)
}
}
n := raft.RestartNode(c)
raftStatusMu.Lock()
@ -455,32 +532,61 @@ func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types
if snapshot != nil {
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
}
w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap)
// discard the previously uncommitted entries
for i, ent := range ents {
if ent.Index > st.Commit {
plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
if cfg.Logger != nil {
cfg.Logger.Info(
"discarding uncommitted WAL entries",
zap.Uint64("entry-index", ent.Index),
zap.Uint64("commit-index-from-wal", st.Commit),
zap.Int("number-of-discarded-entries", len(ents)-i),
)
} else {
plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
}
ents = ents[:i]
break
}
}
// force append the configuration change entries
toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit)
toAppEnts := createConfigChangeEnts(
cfg.Logger,
getIDs(cfg.Logger, snapshot, ents),
uint64(id),
st.Term,
st.Commit,
)
ents = append(ents, toAppEnts...)
// force commit newly appended entries
err := w.Save(raftpb.HardState{}, toAppEnts)
if err != nil {
plog.Fatalf("%v", err)
if cfg.Logger != nil {
cfg.Logger.Fatal("failed to save hard state and entries", zap.Error(err))
} else {
plog.Fatalf("%v", err)
}
}
if len(ents) != 0 {
st.Commit = ents[len(ents)-1].Index
}
plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit)
cl := membership.NewCluster("")
if cfg.Logger != nil {
cfg.Logger.Info(
"forcing restart member",
zap.String("local-member-id", id.String()),
zap.String("cluster-id", cid.String()),
zap.Uint64("commit-index", st.Commit),
)
} else {
plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit)
}
cl := membership.NewCluster(cfg.Logger, "")
cl.SetID(cid)
s := raft.NewMemoryStorage()
if snapshot != nil {
@ -498,6 +604,14 @@ func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types
CheckQuorum: true,
PreVote: cfg.PreVote,
}
if cfg.Logger != nil {
// called after capnslog setting in "init" function
c.Logger, err = logutil.NewRaftLogger(cfg.LoggerConfig)
if err != nil {
log.Fatalf("cannot create raft logger %v", err)
}
}
n := raft.RestartNode(c)
raftStatus = n.Status
return id, cl, n, s, w
@ -508,7 +622,7 @@ func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types
// ID-related entry:
// - ConfChangeAddNode, in which case the contained ID will be added into the set.
// - ConfChangeRemoveNode, in which case the contained ID will be removed from the set.
func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
func getIDs(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
ids := make(map[uint64]bool)
if snap != nil {
for _, id := range snap.Metadata.ConfState.Nodes {
@ -529,7 +643,11 @@ func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
case raftpb.ConfChangeUpdateNode:
// do nothing
default:
plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!")
if lg != nil {
lg.Panic("unknown ConfChange Type", zap.String("type", cc.Type.String()))
} else {
plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!")
}
}
}
sids := make(types.Uint64Slice, 0, len(ids))
@ -545,7 +663,7 @@ func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
// `self` is _not_ removed, even if present in the set.
// If `self` is not inside the given ids, it creates a Raft entry to add a
// default member with the given `self`.
func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
func createConfigChangeEnts(lg *zap.Logger, ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
ents := make([]raftpb.Entry, 0)
next := index + 1
found := false
@ -574,7 +692,11 @@ func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raf
}
ctx, err := json.Marshal(m)
if err != nil {
plog.Panicf("marshal member should never fail: %v", err)
if lg != nil {
lg.Panic("failed to marshal member", zap.Error(err))
} else {
plog.Panicf("marshal member should never fail: %v", err)
}
}
cc := &raftpb.ConfChange{
Type: raftpb.ConfChangeAddNode,

View File

@ -17,6 +17,7 @@ package etcdserver
import (
"encoding/json"
"reflect"
"sync"
"testing"
"time"
@ -27,6 +28,8 @@ import (
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
"github.com/coreos/etcd/rafthttp"
"go.uber.org/zap"
)
func TestGetIDs(t *testing.T) {
@ -64,7 +67,7 @@ func TestGetIDs(t *testing.T) {
if tt.confState != nil {
snap.Metadata.ConfState = *tt.confState
}
idSet := getIDs(&snap, tt.ents)
idSet := getIDs(testLogger, &snap, tt.ents)
if !reflect.DeepEqual(idSet, tt.widSet) {
t.Errorf("#%d: idset = %#v, want %#v", i, idSet, tt.widSet)
}
@ -144,7 +147,7 @@ func TestCreateConfigChangeEnts(t *testing.T) {
}
for i, tt := range tests {
gents := createConfigChangeEnts(tt.ids, tt.self, tt.term, tt.index)
gents := createConfigChangeEnts(testLogger, tt.ids, tt.self, tt.term, tt.index)
if !reflect.DeepEqual(gents, tt.wents) {
t.Errorf("#%d: ents = %v, want %v", i, gents, tt.wents)
}
@ -154,12 +157,13 @@ func TestCreateConfigChangeEnts(t *testing.T) {
func TestStopRaftWhenWaitingForApplyDone(t *testing.T) {
n := newNopReadyNode()
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
Node: n,
storage: mockstorage.NewStorageRecorder(""),
raftStorage: raft.NewMemoryStorage(),
transport: rafthttp.NewNopTransporter(),
})
srv := &EtcdServer{r: *r}
srv := &EtcdServer{lgMu: new(sync.RWMutex), lg: zap.NewExample(), r: *r}
srv.r.start(nil)
n.readyc <- raft.Ready{}
select {
@ -181,12 +185,13 @@ func TestConfgChangeBlocksApply(t *testing.T) {
n := newNopReadyNode()
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
Node: n,
storage: mockstorage.NewStorageRecorder(""),
raftStorage: raft.NewMemoryStorage(),
transport: rafthttp.NewNopTransporter(),
})
srv := &EtcdServer{r: *r}
srv := &EtcdServer{lgMu: new(sync.RWMutex), lg: zap.NewExample(), r: *r}
srv.r.start(&raftReadyHandler{
getLead: func() uint64 { return 0 },

File diff suppressed because it is too large Load Diff

View File

@ -23,9 +23,12 @@ import (
"path"
"path/filepath"
"reflect"
"sync"
"testing"
"time"
"go.uber.org/zap"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/etcdserver/membership"
"github.com/coreos/etcd/etcdserver/v2store"
@ -89,6 +92,8 @@ func TestDoLocalAction(t *testing.T) {
for i, tt := range tests {
st := mockstore.NewRecorder()
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
v2store: st,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
}
@ -142,6 +147,8 @@ func TestDoBadLocalAction(t *testing.T) {
for i, tt := range tests {
st := mockstore.NewErrRecorder(storeErr)
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
v2store: st,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
}
@ -171,12 +178,15 @@ func TestApplyRepeat(t *testing.T) {
cl.SetStore(v2store.New())
cl.AddMember(&membership.Member{ID: 1234})
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
Node: n,
raftStorage: raft.NewMemoryStorage(),
storage: mockstorage.NewStorageRecorder(""),
transport: rafthttp.NewNopTransporter(),
})
s := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
r: *r,
v2store: st,
cluster: cl,
@ -448,7 +458,11 @@ func TestApplyRequest(t *testing.T) {
for i, tt := range tests {
st := mockstore.NewRecorder()
srv := &EtcdServer{v2store: st}
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
v2store: st,
}
srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster}
resp := srv.applyV2Request((*RequestV2)(&tt.req))
@ -465,6 +479,8 @@ func TestApplyRequest(t *testing.T) {
func TestApplyRequestOnAdminMemberAttributes(t *testing.T) {
cl := newTestCluster([]*membership.Member{{ID: 1}})
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
v2store: mockstore.NewRecorder(),
cluster: cl,
}
@ -484,7 +500,7 @@ func TestApplyRequestOnAdminMemberAttributes(t *testing.T) {
}
func TestApplyConfChangeError(t *testing.T) {
cl := membership.NewCluster("")
cl := membership.NewCluster(zap.NewExample(), "")
cl.SetStore(v2store.New())
for i := 1; i <= 4; i++ {
cl.AddMember(&membership.Member{ID: types.ID(i)})
@ -527,7 +543,9 @@ func TestApplyConfChangeError(t *testing.T) {
for i, tt := range tests {
n := newNodeRecorder()
srv := &EtcdServer{
r: *newRaftNode(raftNodeConfig{Node: n}),
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
r: *newRaftNode(raftNodeConfig{lg: zap.NewExample(), Node: n}),
cluster: cl,
}
_, err := srv.applyConfChange(tt.cc, nil)
@ -548,16 +566,19 @@ func TestApplyConfChangeError(t *testing.T) {
}
func TestApplyConfChangeShouldStop(t *testing.T) {
cl := membership.NewCluster("")
cl := membership.NewCluster(zap.NewExample(), "")
cl.SetStore(v2store.New())
for i := 1; i <= 3; i++ {
cl.AddMember(&membership.Member{ID: types.ID(i)})
}
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
Node: newNodeNop(),
transport: rafthttp.NewNopTransporter(),
})
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
id: 1,
r: *r,
cluster: cl,
@ -589,14 +610,17 @@ func TestApplyConfChangeShouldStop(t *testing.T) {
// TestApplyConfigChangeUpdatesConsistIndex ensures a config change also updates the consistIndex
// where consistIndex equals to applied index.
func TestApplyConfigChangeUpdatesConsistIndex(t *testing.T) {
cl := membership.NewCluster("")
cl := membership.NewCluster(zap.NewExample(), "")
cl.SetStore(v2store.New())
cl.AddMember(&membership.Member{ID: types.ID(1)})
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
Node: newNodeNop(),
transport: rafthttp.NewNopTransporter(),
})
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
id: 1,
r: *r,
cluster: cl,
@ -632,16 +656,19 @@ func TestApplyConfigChangeUpdatesConsistIndex(t *testing.T) {
// TestApplyMultiConfChangeShouldStop ensures that apply will return shouldStop
// if the local member is removed along with other conf updates.
func TestApplyMultiConfChangeShouldStop(t *testing.T) {
cl := membership.NewCluster("")
cl := membership.NewCluster(zap.NewExample(), "")
cl.SetStore(v2store.New())
for i := 1; i <= 5; i++ {
cl.AddMember(&membership.Member{ID: types.ID(i)})
}
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
Node: newNodeNop(),
transport: rafthttp.NewNopTransporter(),
})
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
id: 2,
r: *r,
cluster: cl,
@ -677,13 +704,16 @@ func TestDoProposal(t *testing.T) {
for i, tt := range tests {
st := mockstore.NewRecorder()
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
Node: newNodeCommitter(),
storage: mockstorage.NewStorageRecorder(""),
raftStorage: raft.NewMemoryStorage(),
transport: rafthttp.NewNopTransporter(),
})
srv := &EtcdServer{
Cfg: ServerConfig{TickMs: 1},
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: ServerConfig{Logger: zap.NewExample(), TickMs: 1},
r: *r,
v2store: st,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
@ -712,7 +742,9 @@ func TestDoProposal(t *testing.T) {
func TestDoProposalCancelled(t *testing.T) {
wt := mockwait.NewRecorder()
srv := &EtcdServer{
Cfg: ServerConfig{TickMs: 1},
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: ServerConfig{Logger: zap.NewExample(), TickMs: 1},
r: *newRaftNode(raftNodeConfig{Node: newNodeNop()}),
w: wt,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
@ -734,7 +766,9 @@ func TestDoProposalCancelled(t *testing.T) {
func TestDoProposalTimeout(t *testing.T) {
srv := &EtcdServer{
Cfg: ServerConfig{TickMs: 1},
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: ServerConfig{Logger: zap.NewExample(), TickMs: 1},
r: *newRaftNode(raftNodeConfig{Node: newNodeNop()}),
w: mockwait.NewNop(),
reqIDGen: idutil.NewGenerator(0, time.Time{}),
@ -751,8 +785,10 @@ func TestDoProposalTimeout(t *testing.T) {
func TestDoProposalStopped(t *testing.T) {
srv := &EtcdServer{
Cfg: ServerConfig{TickMs: 1},
r: *newRaftNode(raftNodeConfig{Node: newNodeNop()}),
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: ServerConfig{Logger: zap.NewExample(), TickMs: 1},
r: *newRaftNode(raftNodeConfig{lg: zap.NewExample(), Node: newNodeNop()}),
w: mockwait.NewNop(),
reqIDGen: idutil.NewGenerator(0, time.Time{}),
}
@ -771,7 +807,9 @@ func TestSync(t *testing.T) {
n := newNodeRecorder()
ctx, cancel := context.WithCancel(context.TODO())
srv := &EtcdServer{
r: *newRaftNode(raftNodeConfig{Node: n}),
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
r: *newRaftNode(raftNodeConfig{lg: zap.NewExample(), Node: n}),
reqIDGen: idutil.NewGenerator(0, time.Time{}),
ctx: ctx,
cancel: cancel,
@ -814,7 +852,9 @@ func TestSyncTimeout(t *testing.T) {
n := newProposalBlockerRecorder()
ctx, cancel := context.WithCancel(context.TODO())
srv := &EtcdServer{
r: *newRaftNode(raftNodeConfig{Node: n}),
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
r: *newRaftNode(raftNodeConfig{lg: zap.NewExample(), Node: n}),
reqIDGen: idutil.NewGenerator(0, time.Time{}),
ctx: ctx,
cancel: cancel,
@ -848,6 +888,7 @@ func TestSyncTrigger(t *testing.T) {
st := make(chan time.Time, 1)
tk := &time.Ticker{C: st}
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
Node: n,
raftStorage: raft.NewMemoryStorage(),
transport: rafthttp.NewNopTransporter(),
@ -855,7 +896,9 @@ func TestSyncTrigger(t *testing.T) {
})
srv := &EtcdServer{
Cfg: ServerConfig{TickMs: 1},
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: ServerConfig{Logger: zap.NewExample(), TickMs: 1},
r: *r,
v2store: mockstore.NewNop(),
SyncTicker: tk,
@ -908,15 +951,18 @@ func TestSnapshot(t *testing.T) {
st := mockstore.NewRecorderStream()
p := mockstorage.NewStorageRecorderStream("")
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
Node: newNodeNop(),
raftStorage: s,
storage: p,
})
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
r: *r,
v2store: st,
}
srv.kv = mvcc.New(be, &lease.FakeLessor{}, &srv.consistIndex)
srv.kv = mvcc.New(zap.NewExample(), be, &lease.FakeLessor{}, &srv.consistIndex)
srv.be = be
ch := make(chan struct{}, 2)
@ -958,7 +1004,7 @@ func TestSnapshot(t *testing.T) {
func TestSnapshotOrdering(t *testing.T) {
n := newNopReadyNode()
st := v2store.New()
cl := membership.NewCluster("abc")
cl := membership.NewCluster(zap.NewExample(), "abc")
cl.SetStore(st)
testdir, err := ioutil.TempDir(os.TempDir(), "testsnapdir")
@ -976,6 +1022,7 @@ func TestSnapshotOrdering(t *testing.T) {
p := mockstorage.NewStorageRecorderStream(testdir)
tr, snapDoneC := rafthttp.NewSnapTransporter(snapdir)
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
Node: n,
transport: tr,
@ -983,10 +1030,12 @@ func TestSnapshotOrdering(t *testing.T) {
raftStorage: rs,
})
s := &EtcdServer{
Cfg: ServerConfig{DataDir: testdir},
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: ServerConfig{Logger: zap.NewExample(), DataDir: testdir},
r: *r,
v2store: st,
snapshotter: raftsnap.New(snapdir),
snapshotter: raftsnap.New(zap.NewExample(), snapdir),
cluster: cl,
SyncTicker: &time.Ticker{},
}
@ -994,7 +1043,7 @@ func TestSnapshotOrdering(t *testing.T) {
be, tmpPath := backend.NewDefaultTmpBackend()
defer os.RemoveAll(tmpPath)
s.kv = mvcc.New(be, &lease.FakeLessor{}, &s.consistIndex)
s.kv = mvcc.New(zap.NewExample(), be, &lease.FakeLessor{}, &s.consistIndex)
s.be = be
s.start()
@ -1038,13 +1087,16 @@ func TestTriggerSnap(t *testing.T) {
st := mockstore.NewRecorder()
p := mockstorage.NewStorageRecorderStream("")
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
Node: newNodeCommitter(),
raftStorage: raft.NewMemoryStorage(),
storage: p,
transport: rafthttp.NewNopTransporter(),
})
srv := &EtcdServer{
Cfg: ServerConfig{TickMs: 1, SnapCount: uint64(snapc)},
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapCount: uint64(snapc)},
r: *r,
v2store: st,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
@ -1052,7 +1104,7 @@ func TestTriggerSnap(t *testing.T) {
}
srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster}
srv.kv = mvcc.New(be, &lease.FakeLessor{}, &srv.consistIndex)
srv.kv = mvcc.New(zap.NewExample(), be, &lease.FakeLessor{}, &srv.consistIndex)
srv.be = be
srv.start()
@ -1086,7 +1138,7 @@ func TestTriggerSnap(t *testing.T) {
func TestConcurrentApplyAndSnapshotV3(t *testing.T) {
n := newNopReadyNode()
st := v2store.New()
cl := membership.NewCluster("abc")
cl := membership.NewCluster(zap.NewExample(), "abc")
cl.SetStore(st)
testdir, err := ioutil.TempDir(os.TempDir(), "testsnapdir")
@ -1101,6 +1153,7 @@ func TestConcurrentApplyAndSnapshotV3(t *testing.T) {
rs := raft.NewMemoryStorage()
tr, snapDoneC := rafthttp.NewSnapTransporter(testdir)
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
Node: n,
transport: tr,
@ -1108,10 +1161,12 @@ func TestConcurrentApplyAndSnapshotV3(t *testing.T) {
raftStorage: rs,
})
s := &EtcdServer{
Cfg: ServerConfig{DataDir: testdir},
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: ServerConfig{Logger: zap.NewExample(), DataDir: testdir},
r: *r,
v2store: st,
snapshotter: raftsnap.New(testdir),
snapshotter: raftsnap.New(zap.NewExample(), testdir),
cluster: cl,
SyncTicker: &time.Ticker{},
}
@ -1121,7 +1176,7 @@ func TestConcurrentApplyAndSnapshotV3(t *testing.T) {
defer func() {
os.RemoveAll(tmpPath)
}()
s.kv = mvcc.New(be, &lease.FakeLessor{}, &s.consistIndex)
s.kv = mvcc.New(zap.NewExample(), be, &lease.FakeLessor{}, &s.consistIndex)
s.be = be
s.start()
@ -1186,12 +1241,15 @@ func TestAddMember(t *testing.T) {
st := v2store.New()
cl.SetStore(st)
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
Node: n,
raftStorage: raft.NewMemoryStorage(),
storage: mockstorage.NewStorageRecorder(""),
transport: rafthttp.NewNopTransporter(),
})
s := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
r: *r,
v2store: st,
cluster: cl,
@ -1227,12 +1285,15 @@ func TestRemoveMember(t *testing.T) {
cl.SetStore(v2store.New())
cl.AddMember(&membership.Member{ID: 1234})
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
Node: n,
raftStorage: raft.NewMemoryStorage(),
storage: mockstorage.NewStorageRecorder(""),
transport: rafthttp.NewNopTransporter(),
})
s := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
r: *r,
v2store: st,
cluster: cl,
@ -1267,12 +1328,15 @@ func TestUpdateMember(t *testing.T) {
cl.SetStore(st)
cl.AddMember(&membership.Member{ID: 1234})
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
Node: n,
raftStorage: raft.NewMemoryStorage(),
storage: mockstorage.NewStorageRecorder(""),
transport: rafthttp.NewNopTransporter(),
})
s := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
r: *r,
v2store: st,
cluster: cl,
@ -1307,10 +1371,12 @@ func TestPublish(t *testing.T) {
w := wait.NewWithResponse(ch)
ctx, cancel := context.WithCancel(context.TODO())
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
readych: make(chan struct{}),
Cfg: ServerConfig{TickMs: 1},
Cfg: ServerConfig{Logger: zap.NewExample(), TickMs: 1},
id: 1,
r: *newRaftNode(raftNodeConfig{Node: n}),
r: *newRaftNode(raftNodeConfig{lg: zap.NewExample(), Node: n}),
attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://a", "http://b"}},
cluster: &membership.RaftCluster{},
w: w,
@ -1354,11 +1420,14 @@ func TestPublish(t *testing.T) {
func TestPublishStopped(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
Node: newNodeNop(),
transport: rafthttp.NewNopTransporter(),
})
srv := &EtcdServer{
Cfg: ServerConfig{TickMs: 1},
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: ServerConfig{Logger: zap.NewExample(), TickMs: 1},
r: *r,
cluster: &membership.RaftCluster{},
w: mockwait.NewNop(),
@ -1380,8 +1449,10 @@ func TestPublishRetry(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
n := newNodeRecorderStream()
srv := &EtcdServer{
Cfg: ServerConfig{TickMs: 1},
r: *newRaftNode(raftNodeConfig{Node: n}),
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: ServerConfig{Logger: zap.NewExample(), TickMs: 1},
r: *newRaftNode(raftNodeConfig{lg: zap.NewExample(), Node: n}),
w: mockwait.NewNop(),
stopping: make(chan struct{}),
reqIDGen: idutil.NewGenerator(0, time.Time{}),
@ -1420,9 +1491,11 @@ func TestUpdateVersion(t *testing.T) {
w := wait.NewWithResponse(ch)
ctx, cancel := context.WithCancel(context.TODO())
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
id: 1,
Cfg: ServerConfig{TickMs: 1},
r: *newRaftNode(raftNodeConfig{Node: n}),
Cfg: ServerConfig{Logger: zap.NewExample(), TickMs: 1},
r: *newRaftNode(raftNodeConfig{lg: zap.NewExample(), Node: n}),
attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://node1.com"}},
cluster: &membership.RaftCluster{},
w: w,
@ -1459,6 +1532,8 @@ func TestUpdateVersion(t *testing.T) {
func TestStopNotify(t *testing.T) {
s := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
stop: make(chan struct{}),
done: make(chan struct{}),
}
@ -1510,7 +1585,7 @@ func TestGetOtherPeerURLs(t *testing.T) {
},
}
for i, tt := range tests {
cl := membership.NewClusterFromMembers("", types.ID(0), tt.membs)
cl := membership.NewClusterFromMembers(zap.NewExample(), "", types.ID(0), tt.membs)
self := "1"
urls := getRemotePeerURLs(cl, self)
if !reflect.DeepEqual(urls, tt.wurls) {
@ -1646,7 +1721,7 @@ func (n *nodeCommitter) Propose(ctx context.Context, data []byte) error {
}
func newTestCluster(membs []*membership.Member) *membership.RaftCluster {
c := membership.NewCluster("")
c := membership.NewCluster(zap.NewExample(), "")
for _, m := range membs {
c.AddMember(m)
}

View File

@ -20,6 +20,9 @@ import (
"github.com/coreos/etcd/mvcc/backend"
"github.com/coreos/etcd/raft/raftpb"
"github.com/coreos/etcd/raftsnap"
humanize "github.com/dustin/go-humanize"
"go.uber.org/zap"
)
// createMergedSnapshotMessage creates a snapshot message that contains: raft status (term, conf),
@ -30,14 +33,18 @@ func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi
clone := s.v2store.Clone()
d, err := clone.SaveNoCopy()
if err != nil {
plog.Panicf("store save should never fail: %v", err)
if lg := s.getLogger(); lg != nil {
lg.Panic("failed to save v2 store data", zap.Error(err))
} else {
plog.Panicf("store save should never fail: %v", err)
}
}
// commit kv to write metadata(for example: consistent index).
s.KV().Commit()
dbsnap := s.be.Snapshot()
// get a snapshot of v3 KV as readCloser
rc := newSnapshotReaderCloser(dbsnap)
rc := newSnapshotReaderCloser(s.getLogger(), dbsnap)
// put the []byte snapshot of store into raft snapshot and return the merged snapshot with
// KV readCloser snapshot.
@ -54,19 +61,39 @@ func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi
return *raftsnap.NewMessage(m, rc, dbsnap.Size())
}
func newSnapshotReaderCloser(snapshot backend.Snapshot) io.ReadCloser {
func newSnapshotReaderCloser(lg *zap.Logger, snapshot backend.Snapshot) io.ReadCloser {
pr, pw := io.Pipe()
go func() {
n, err := snapshot.WriteTo(pw)
if err == nil {
plog.Infof("wrote database snapshot out [total bytes: %d]", n)
if lg != nil {
lg.Info(
"sent database snapshot to writer",
zap.Int64("bytes", n),
zap.String("size", humanize.Bytes(uint64(n))),
)
} else {
plog.Infof("wrote database snapshot out [total bytes: %d]", n)
}
} else {
plog.Warningf("failed to write database snapshot out [written bytes: %d]: %v", n, err)
if lg != nil {
lg.Warn(
"failed to send database snapshot to writer",
zap.String("size", humanize.Bytes(uint64(n))),
zap.Error(err),
)
} else {
plog.Warningf("failed to write database snapshot out [written bytes: %d]: %v", n, err)
}
}
pw.CloseWithError(err)
err = snapshot.Close()
if err != nil {
plog.Panicf("failed to close database snapshot: %v", err)
if lg != nil {
lg.Panic("failed to close database snapshot", zap.Error(err))
} else {
plog.Panicf("failed to close database snapshot: %v", err)
}
}
}()
return pr

View File

@ -24,6 +24,8 @@ import (
"github.com/coreos/etcd/raftsnap"
"github.com/coreos/etcd/wal"
"github.com/coreos/etcd/wal/walpb"
"go.uber.org/zap"
)
type Storage interface {
@ -63,7 +65,7 @@ func (st *storage) SaveSnap(snap raftpb.Snapshot) error {
return st.WAL.ReleaseLockTo(snap.Metadata.Index)
}
func readWAL(waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) {
func readWAL(lg *zap.Logger, waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) {
var (
err error
wmetadata []byte
@ -71,19 +73,35 @@ func readWAL(waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID,
repaired := false
for {
if w, err = wal.Open(waldir, snap); err != nil {
plog.Fatalf("open wal error: %v", err)
if w, err = wal.Open(lg, waldir, snap); err != nil {
if lg != nil {
lg.Fatal("failed to open WAL", zap.Error(err))
} else {
plog.Fatalf("open wal error: %v", err)
}
}
if wmetadata, st, ents, err = w.ReadAll(); err != nil {
w.Close()
// we can only repair ErrUnexpectedEOF and we never repair twice.
if repaired || err != io.ErrUnexpectedEOF {
plog.Fatalf("read wal error (%v) and cannot be repaired", err)
if lg != nil {
lg.Fatal("failed to read WAL, cannot be repaired", zap.Error(err))
} else {
plog.Fatalf("read wal error (%v) and cannot be repaired", err)
}
}
if !wal.Repair(waldir) {
plog.Fatalf("WAL error (%v) cannot be repaired", err)
if !wal.Repair(lg, waldir) {
if lg != nil {
lg.Fatal("failed to repair WAL", zap.Error(err))
} else {
plog.Fatalf("WAL error (%v) cannot be repaired", err)
}
} else {
plog.Infof("repaired WAL error (%v)", err)
if lg != nil {
lg.Info("repaired WAL", zap.Error(err))
} else {
plog.Infof("repaired WAL error (%v)", err)
}
repaired = true
}
continue

View File

@ -21,6 +21,8 @@ import (
"github.com/coreos/etcd/etcdserver/membership"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/rafthttp"
"go.uber.org/zap"
)
// isConnectedToQuorumSince checks whether the local member is connected to the
@ -97,18 +99,28 @@ func (nc *notifier) notify(err error) {
close(nc.c)
}
func warnOfExpensiveRequest(now time.Time, stringer fmt.Stringer) {
warnOfExpensiveGenericRequest(now, stringer, "")
func warnOfExpensiveRequest(lg *zap.Logger, now time.Time, stringer fmt.Stringer) {
warnOfExpensiveGenericRequest(lg, now, stringer, "")
}
func warnOfExpensiveReadOnlyRangeRequest(now time.Time, stringer fmt.Stringer) {
warnOfExpensiveGenericRequest(now, stringer, "read-only range ")
func warnOfExpensiveReadOnlyRangeRequest(lg *zap.Logger, now time.Time, stringer fmt.Stringer) {
warnOfExpensiveGenericRequest(lg, now, stringer, "read-only range ")
}
func warnOfExpensiveGenericRequest(now time.Time, stringer fmt.Stringer, prefix string) {
func warnOfExpensiveGenericRequest(lg *zap.Logger, now time.Time, stringer fmt.Stringer, prefix string) {
// TODO: add metrics
d := time.Since(now)
if d > warnApplyDuration {
plog.Warningf("%srequest %q took too long (%v) to execute", prefix, stringer.String(), d)
if lg != nil {
lg.Warn(
"request took too long",
zap.Duration("took", d),
zap.Duration("expected-duration", warnApplyDuration),
zap.String("prefix", prefix),
zap.String("request", stringer.String()),
)
} else {
plog.Warningf("%srequest %q took too long (%v) to execute", prefix, stringer.String(), d)
}
}
}

View File

@ -19,6 +19,8 @@ import (
"testing"
"time"
"go.uber.org/zap"
"github.com/coreos/etcd/etcdserver/membership"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/raft/raftpb"
@ -31,7 +33,7 @@ func TestLongestConnected(t *testing.T) {
if err != nil {
t.Fatal(err)
}
clus, err := membership.NewClusterFromURLsMap("test", umap)
clus, err := membership.NewClusterFromURLsMap(zap.NewExample(), "test", umap)
if err != nil {
t.Fatal(err)
}

View File

@ -18,8 +18,11 @@ import (
"bytes"
"context"
"encoding/binary"
"fmt"
"time"
"go.uber.org/zap"
"github.com/coreos/etcd/auth"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/etcdserver/membership"
@ -84,7 +87,7 @@ type Authenticator interface {
}
func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
defer warnOfExpensiveReadOnlyRangeRequest(time.Now(), r)
defer warnOfExpensiveReadOnlyRangeRequest(s.getLogger(), time.Now(), r)
if !r.Serializable {
err := s.linearizableReadNotify(ctx)
@ -135,7 +138,7 @@ func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse
return checkTxnAuth(s.authStore, ai, r)
}
defer warnOfExpensiveReadOnlyRangeRequest(time.Now(), r)
defer warnOfExpensiveReadOnlyRangeRequest(s.getLogger(), time.Now(), r)
get := func() { resp, err = s.applyV3Base.Txn(r) }
if serr := s.doSerialize(ctx, chk, get); serr != nil {
@ -358,12 +361,22 @@ func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest
return nil, err
}
lg := s.getLogger()
var resp proto.Message
for {
checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password)
if err != nil {
if err != auth.ErrAuthNotEnabled {
plog.Errorf("invalid authentication request to user %s was issued", r.Name)
if lg != nil {
lg.Warn(
"invalid authentication was requested",
zap.String("user", r.Name),
zap.Error(err),
)
} else {
plog.Errorf("invalid authentication request to user %s was issued", r.Name)
}
}
return nil, err
}
@ -386,7 +399,12 @@ func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest
if checkedRevision == s.AuthStore().Revision() {
break
}
plog.Infof("revision when password checked is obsolete, retrying")
if lg != nil {
lg.Info("revision when password checked became stale; retrying")
} else {
plog.Infof("revision when password checked is obsolete, retrying")
}
}
return resp.(*pb.AuthenticateResponse), nil
@ -626,13 +644,18 @@ func (s *EtcdServer) linearizableReadLoop() {
s.readNotifier = nextnr
s.readMu.Unlock()
lg := s.getLogger()
cctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
if err := s.r.ReadIndex(cctx, ctx); err != nil {
cancel()
if err == raft.ErrStopped {
return
}
plog.Errorf("failed to get read index from raft: %v", err)
if lg != nil {
lg.Warn("failed to get read index from Raft", zap.Error(err))
} else {
plog.Errorf("failed to get read index from raft: %v", err)
}
nr.notify(err)
continue
}
@ -649,10 +672,22 @@ func (s *EtcdServer) linearizableReadLoop() {
if !done {
// a previous request might time out. now we should ignore the response of it and
// continue waiting for the response of the current requests.
plog.Warningf("ignored out-of-date read index response (want %v, got %v)", rs.RequestCtx, ctx)
if lg != nil {
lg.Warn(
"ignored out-of-date read index response",
zap.String("ctx-expected", fmt.Sprintf("%+v", string(rs.RequestCtx))),
zap.String("ctx-got", fmt.Sprintf("%+v", string(ctx))),
)
} else {
plog.Warningf("ignored out-of-date read index response (want %v, got %v)", rs.RequestCtx, ctx)
}
}
case <-time.After(s.Cfg.ReqTimeout()):
plog.Warningf("timed out waiting for read index response")
if lg != nil {
lg.Warn("timed out waiting for read index response", zap.Duration("timeout", s.Cfg.ReqTimeout()))
} else {
plog.Warningf("timed out waiting for read index response")
}
nr.notify(ErrTimeout)
timeout = true
case <-s.stopping:

View File

@ -1,9 +1,8 @@
agent-configs:
- etcd-exec-path: ./bin/etcd
- etcd-exec: ./bin/etcd
agent-addr: 127.0.0.1:19027
failpoint-http-addr: http://127.0.0.1:7381
base-dir: /tmp/etcd-functional-1
etcd-log-path: /tmp/etcd-functional-1/etcd.log
etcd-client-proxy: false
etcd-peer-proxy: true
etcd-client-endpoint: 127.0.0.1:1379
@ -34,6 +33,9 @@ agent-configs:
quota-backend-bytes: 10740000000 # 10 GiB
pre-vote: true
initial-corrupt-check: true
logger: zap
log-output: /tmp/etcd-functional-1/etcd.log
debug: true
client-cert-data: ""
client-cert-path: ""
client-key-data: ""
@ -48,11 +50,10 @@ agent-configs:
peer-trusted-ca-path: ""
snapshot-path: /tmp/etcd-functional-1.snapshot.db
- etcd-exec-path: ./bin/etcd
- etcd-exec: ./bin/etcd
agent-addr: 127.0.0.1:29027
failpoint-http-addr: http://127.0.0.1:7382
base-dir: /tmp/etcd-functional-2
etcd-log-path: /tmp/etcd-functional-2/etcd.log
etcd-client-proxy: false
etcd-peer-proxy: true
etcd-client-endpoint: 127.0.0.1:2379
@ -83,6 +84,9 @@ agent-configs:
quota-backend-bytes: 10740000000 # 10 GiB
pre-vote: true
initial-corrupt-check: true
logger: zap
log-output: /tmp/etcd-functional-2/etcd.log
debug: true
client-cert-data: ""
client-cert-path: ""
client-key-data: ""
@ -97,11 +101,10 @@ agent-configs:
peer-trusted-ca-path: ""
snapshot-path: /tmp/etcd-functional-2.snapshot.db
- etcd-exec-path: ./bin/etcd
- etcd-exec: ./bin/etcd
agent-addr: 127.0.0.1:39027
failpoint-http-addr: http://127.0.0.1:7383
base-dir: /tmp/etcd-functional-3
etcd-log-path: /tmp/etcd-functional-3/etcd.log
etcd-client-proxy: false
etcd-peer-proxy: true
etcd-client-endpoint: 127.0.0.1:3379
@ -132,6 +135,9 @@ agent-configs:
quota-backend-bytes: 10740000000 # 10 GiB
pre-vote: true
initial-corrupt-check: true
logger: zap
log-output: /tmp/etcd-functional-3/etcd.log
debug: true
client-cert-data: ""
client-cert-path: ""
client-key-data: ""

View File

@ -25,6 +25,7 @@ import (
"syscall"
"time"
"github.com/coreos/etcd/embed"
"github.com/coreos/etcd/functional/rpcpb"
"github.com/coreos/etcd/pkg/fileutil"
"github.com/coreos/etcd/pkg/proxy"
@ -84,50 +85,140 @@ func (srv *Server) handleTesterRequest(req *rpcpb.Request) (resp *rpcpb.Response
}
}
func (srv *Server) handle_INITIAL_START_ETCD(req *rpcpb.Request) (*rpcpb.Response, error) {
if srv.last != rpcpb.Operation_NOT_STARTED {
return &rpcpb.Response{
Success: false,
Status: fmt.Sprintf("%q is not valid; last server operation was %q", rpcpb.Operation_INITIAL_START_ETCD.String(), srv.last.String()),
Member: req.Member,
}, nil
}
err := fileutil.TouchDirAll(srv.Member.BaseDir)
func (srv *Server) createEtcdLogFile() error {
var err error
srv.etcdLogFile, err = os.Create(srv.Member.Etcd.LogOutput)
if err != nil {
return nil, err
return err
}
srv.lg.Info("created base directory", zap.String("path", srv.Member.BaseDir))
srv.lg.Info("created etcd log file", zap.String("path", srv.Member.Etcd.LogOutput))
return nil
}
if err = srv.createEtcdLogFile(); err != nil {
return nil, err
func (srv *Server) creatEtcd(fromSnapshot bool) error {
if !fileutil.Exist(srv.Member.EtcdExec) && srv.Member.EtcdExec != "embed" {
return fmt.Errorf("unknown etcd exec %q or path does not exist", srv.Member.EtcdExec)
}
srv.creatEtcdCmd(false)
if err = srv.saveTLSAssets(); err != nil {
return nil, err
}
if err = srv.startEtcdCmd(); err != nil {
return nil, err
}
srv.lg.Info("started etcd", zap.String("command-path", srv.etcdCmd.Path))
if err = srv.loadAutoTLSAssets(); err != nil {
return nil, err
if srv.Member.EtcdExec != "embed" {
etcdPath, etcdFlags := srv.Member.EtcdExec, srv.Member.Etcd.Flags()
if fromSnapshot {
etcdFlags = srv.Member.EtcdOnSnapshotRestore.Flags()
}
u, _ := url.Parse(srv.Member.FailpointHTTPAddr)
srv.lg.Info(
"creating etcd command",
zap.String("etcd-exec", etcdPath),
zap.Strings("etcd-flags", etcdFlags),
zap.String("failpoint-http-addr", srv.Member.FailpointHTTPAddr),
zap.String("failpoint-addr", u.Host),
)
srv.etcdCmd = exec.Command(etcdPath, etcdFlags...)
srv.etcdCmd.Env = []string{"GOFAIL_HTTP=" + u.Host}
srv.etcdCmd.Stdout = srv.etcdLogFile
srv.etcdCmd.Stderr = srv.etcdLogFile
return nil
}
// wait some time for etcd listener start
// before setting up proxy
time.Sleep(time.Second)
if err = srv.startProxy(); err != nil {
return nil, err
cfg, err := srv.Member.Etcd.EmbedConfig()
if err != nil {
return err
}
return &rpcpb.Response{
Success: true,
Status: "start etcd PASS",
Member: srv.Member,
}, nil
srv.lg.Info("starting embedded etcd", zap.String("name", cfg.Name))
srv.etcdServer, err = embed.StartEtcd(cfg)
if err != nil {
return err
}
srv.lg.Info("started embedded etcd", zap.String("name", cfg.Name))
return nil
}
// start but do not wait for it to complete
func (srv *Server) runEtcd() error {
errc := make(chan error)
go func() {
time.Sleep(5 * time.Second)
// server advertise client/peer listener had to start first
// before setting up proxy listener
errc <- srv.startProxy()
}()
if srv.etcdCmd != nil {
srv.lg.Info(
"starting etcd command",
zap.String("command-path", srv.etcdCmd.Path),
)
err := srv.etcdCmd.Start()
perr := <-errc
srv.lg.Info(
"started etcd command",
zap.String("command-path", srv.etcdCmd.Path),
zap.Errors("errors", []error{err, perr}),
)
if err != nil {
return err
}
return perr
}
select {
case <-srv.etcdServer.Server.ReadyNotify():
srv.lg.Info("embedded etcd is ready")
case <-time.After(time.Minute):
srv.etcdServer.Close()
return fmt.Errorf("took too long to start %v", <-srv.etcdServer.Err())
}
return <-errc
}
// SIGQUIT to exit with stackstrace
func (srv *Server) stopEtcd(sig os.Signal) error {
srv.stopProxy()
if srv.etcdCmd != nil {
srv.lg.Info(
"stopping etcd command",
zap.String("command-path", srv.etcdCmd.Path),
zap.String("signal", sig.String()),
)
err := srv.etcdCmd.Process.Signal(sig)
if err != nil {
return err
}
errc := make(chan error)
go func() {
_, ew := srv.etcdCmd.Process.Wait()
errc <- ew
close(errc)
}()
select {
case <-time.After(5 * time.Second):
srv.etcdCmd.Process.Kill()
case e := <-errc:
return e
}
err = <-errc
srv.lg.Info(
"stopped etcd command",
zap.String("command-path", srv.etcdCmd.Path),
zap.String("signal", sig.String()),
zap.Error(err),
)
return err
}
srv.lg.Info("stopping embedded etcd")
srv.etcdServer.Server.HardStop()
srv.etcdServer.Close()
srv.lg.Info("stopped embedded etcd")
return nil
}
func (srv *Server) startProxy() error {
@ -141,6 +232,7 @@ func (srv *Server) startProxy() error {
return err
}
srv.lg.Info("starting proxy on client traffic", zap.String("url", advertiseClientURL.String()))
srv.advertiseClientPortToProxy[advertiseClientURLPort] = proxy.NewServer(proxy.ServerConfig{
Logger: srv.lg,
From: *advertiseClientURL,
@ -164,6 +256,7 @@ func (srv *Server) startProxy() error {
return err
}
srv.lg.Info("starting proxy on peer traffic", zap.String("url", advertisePeerURL.String()))
srv.advertisePeerPortToProxy[advertisePeerURLPort] = proxy.NewServer(proxy.ServerConfig{
Logger: srv.lg,
From: *advertisePeerURL,
@ -222,34 +315,6 @@ func (srv *Server) stopProxy() {
}
}
func (srv *Server) createEtcdLogFile() error {
var err error
srv.etcdLogFile, err = os.Create(srv.Member.EtcdLogPath)
if err != nil {
return err
}
srv.lg.Info("created etcd log file", zap.String("path", srv.Member.EtcdLogPath))
return nil
}
func (srv *Server) creatEtcdCmd(fromSnapshot bool) {
etcdPath, etcdFlags := srv.Member.EtcdExecPath, srv.Member.Etcd.Flags()
if fromSnapshot {
etcdFlags = srv.Member.EtcdOnSnapshotRestore.Flags()
}
u, _ := url.Parse(srv.Member.FailpointHTTPAddr)
srv.lg.Info("creating etcd command",
zap.String("etcd-exec-path", etcdPath),
zap.Strings("etcd-flags", etcdFlags),
zap.String("failpoint-http-addr", srv.Member.FailpointHTTPAddr),
zap.String("failpoint-addr", u.Host),
)
srv.etcdCmd = exec.Command(etcdPath, etcdFlags...)
srv.etcdCmd.Env = []string{"GOFAIL_HTTP=" + u.Host}
srv.etcdCmd.Stdout = srv.etcdLogFile
srv.etcdCmd.Stderr = srv.etcdLogFile
}
// if started with manual TLS, stores TLS assets
// from tester/client to disk before starting etcd process
func (srv *Server) saveTLSAssets() error {
@ -322,7 +387,6 @@ func (srv *Server) saveTLSAssets() error {
zap.String("client-trusted-ca", srv.Member.ClientTrustedCAPath),
)
}
return nil
}
@ -412,9 +476,45 @@ func (srv *Server) loadAutoTLSAssets() error {
return nil
}
// start but do not wait for it to complete
func (srv *Server) startEtcdCmd() error {
return srv.etcdCmd.Start()
func (srv *Server) handle_INITIAL_START_ETCD(req *rpcpb.Request) (*rpcpb.Response, error) {
if srv.last != rpcpb.Operation_NOT_STARTED {
return &rpcpb.Response{
Success: false,
Status: fmt.Sprintf("%q is not valid; last server operation was %q", rpcpb.Operation_INITIAL_START_ETCD.String(), srv.last.String()),
Member: req.Member,
}, nil
}
err := fileutil.TouchDirAll(srv.Member.BaseDir)
if err != nil {
return nil, err
}
srv.lg.Info("created base directory", zap.String("path", srv.Member.BaseDir))
if srv.etcdServer == nil {
if err = srv.createEtcdLogFile(); err != nil {
return nil, err
}
}
if err = srv.saveTLSAssets(); err != nil {
return nil, err
}
if err = srv.creatEtcd(false); err != nil {
return nil, err
}
if err = srv.runEtcd(); err != nil {
return nil, err
}
if err = srv.loadAutoTLSAssets(); err != nil {
return nil, err
}
return &rpcpb.Response{
Success: true,
Status: "start etcd PASS",
Member: srv.Member,
}, nil
}
func (srv *Server) handle_RESTART_ETCD() (*rpcpb.Response, error) {
@ -426,28 +526,19 @@ func (srv *Server) handle_RESTART_ETCD() (*rpcpb.Response, error) {
}
}
srv.creatEtcdCmd(false)
if err = srv.saveTLSAssets(); err != nil {
return nil, err
}
if err = srv.startEtcdCmd(); err != nil {
if err = srv.creatEtcd(false); err != nil {
return nil, err
}
if err = srv.runEtcd(); err != nil {
return nil, err
}
srv.lg.Info("restarted etcd", zap.String("command-path", srv.etcdCmd.Path))
if err = srv.loadAutoTLSAssets(); err != nil {
return nil, err
}
// wait some time for etcd listener start
// before setting up proxy
// TODO: local tests should handle port conflicts
// with clients on restart
time.Sleep(time.Second)
if err = srv.startProxy(); err != nil {
return nil, err
}
return &rpcpb.Response{
Success: true,
Status: "restart etcd PASS",
@ -456,13 +547,15 @@ func (srv *Server) handle_RESTART_ETCD() (*rpcpb.Response, error) {
}
func (srv *Server) handle_SIGTERM_ETCD() (*rpcpb.Response, error) {
srv.stopProxy()
err := stopWithSig(srv.etcdCmd, syscall.SIGTERM)
if err != nil {
if err := srv.stopEtcd(syscall.SIGTERM); err != nil {
return nil, err
}
srv.lg.Info("killed etcd", zap.String("signal", syscall.SIGTERM.String()))
if srv.etcdServer != nil {
srv.etcdServer.GetLogger().Sync()
} else {
srv.etcdLogFile.Sync()
}
return &rpcpb.Response{
Success: true,
@ -471,16 +564,17 @@ func (srv *Server) handle_SIGTERM_ETCD() (*rpcpb.Response, error) {
}
func (srv *Server) handle_SIGQUIT_ETCD_AND_REMOVE_DATA() (*rpcpb.Response, error) {
srv.stopProxy()
err := stopWithSig(srv.etcdCmd, syscall.SIGQUIT)
err := srv.stopEtcd(syscall.SIGQUIT)
if err != nil {
return nil, err
}
srv.lg.Info("killed etcd", zap.String("signal", syscall.SIGQUIT.String()))
srv.etcdLogFile.Sync()
srv.etcdLogFile.Close()
if srv.etcdServer != nil {
srv.etcdServer.GetLogger().Sync()
} else {
srv.etcdLogFile.Sync()
srv.etcdLogFile.Close()
}
// for debugging purposes, rename instead of removing
if err = os.RemoveAll(srv.Member.BaseDir + ".backup"); err != nil {
@ -502,9 +596,6 @@ func (srv *Server) handle_SIGQUIT_ETCD_AND_REMOVE_DATA() (*rpcpb.Response, error
return nil, err
}
}
if err = srv.createEtcdLogFile(); err != nil {
return nil, err
}
return &rpcpb.Response{
Success: true,
@ -537,28 +628,19 @@ func (srv *Server) handle_RESTORE_RESTART_FROM_SNAPSHOT() (resp *rpcpb.Response,
}
func (srv *Server) handle_RESTART_FROM_SNAPSHOT() (resp *rpcpb.Response, err error) {
srv.creatEtcdCmd(true)
if err = srv.saveTLSAssets(); err != nil {
return nil, err
}
if err = srv.startEtcdCmd(); err != nil {
if err = srv.creatEtcd(true); err != nil {
return nil, err
}
if err = srv.runEtcd(); err != nil {
return nil, err
}
srv.lg.Info("restarted etcd", zap.String("command-path", srv.etcdCmd.Path))
if err = srv.loadAutoTLSAssets(); err != nil {
return nil, err
}
// wait some time for etcd listener start
// before setting up proxy
// TODO: local tests should handle port conflicts
// with clients on restart
time.Sleep(time.Second)
if err = srv.startProxy(); err != nil {
return nil, err
}
return &rpcpb.Response{
Success: true,
Status: "restarted etcd from snapshot",
@ -567,30 +649,32 @@ func (srv *Server) handle_RESTART_FROM_SNAPSHOT() (resp *rpcpb.Response, err err
}
func (srv *Server) handle_SIGQUIT_ETCD_AND_ARCHIVE_DATA() (*rpcpb.Response, error) {
srv.stopProxy()
// exit with stackstrace
err := stopWithSig(srv.etcdCmd, syscall.SIGQUIT)
err := srv.stopEtcd(syscall.SIGQUIT)
if err != nil {
return nil, err
}
srv.lg.Info("killed etcd", zap.String("signal", syscall.SIGQUIT.String()))
srv.etcdLogFile.Sync()
srv.etcdLogFile.Close()
if srv.etcdServer != nil {
srv.etcdServer.GetLogger().Sync()
} else {
srv.etcdLogFile.Sync()
srv.etcdLogFile.Close()
}
// TODO: support separate WAL directory
if err = archive(
srv.Member.BaseDir,
srv.Member.EtcdLogPath,
srv.Member.Etcd.LogOutput,
srv.Member.Etcd.DataDir,
); err != nil {
return nil, err
}
srv.lg.Info("archived data", zap.String("base-dir", srv.Member.BaseDir))
if err = srv.createEtcdLogFile(); err != nil {
return nil, err
if srv.etcdServer == nil {
if err = srv.createEtcdLogFile(); err != nil {
return nil, err
}
}
srv.lg.Info("cleaning up page cache")
@ -607,16 +691,17 @@ func (srv *Server) handle_SIGQUIT_ETCD_AND_ARCHIVE_DATA() (*rpcpb.Response, erro
// stop proxy, etcd, delete data directory
func (srv *Server) handle_SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT() (*rpcpb.Response, error) {
srv.stopProxy()
err := stopWithSig(srv.etcdCmd, syscall.SIGQUIT)
err := srv.stopEtcd(syscall.SIGQUIT)
if err != nil {
return nil, err
}
srv.lg.Info("killed etcd", zap.String("signal", syscall.SIGQUIT.String()))
srv.etcdLogFile.Sync()
srv.etcdLogFile.Close()
if srv.etcdServer != nil {
srv.etcdServer.GetLogger().Sync()
} else {
srv.etcdLogFile.Sync()
srv.etcdLogFile.Close()
}
err = os.RemoveAll(srv.Member.BaseDir)
if err != nil {

View File

@ -21,6 +21,7 @@ import (
"os/exec"
"strings"
"github.com/coreos/etcd/embed"
"github.com/coreos/etcd/functional/rpcpb"
"github.com/coreos/etcd/pkg/proxy"
@ -33,8 +34,9 @@ import (
// no need to lock fields since request operations are
// serialized in tester-side
type Server struct {
lg *zap.Logger
grpcServer *grpc.Server
lg *zap.Logger
network string
address string
@ -46,6 +48,7 @@ type Server struct {
*rpcpb.Member
*rpcpb.Tester
etcdServer *embed.Etcd
etcdCmd *exec.Cmd
etcdLogFile *os.File

View File

@ -79,29 +79,6 @@ func getURLAndPort(addr string) (urlAddr *url.URL, port int, err error) {
return urlAddr, port, err
}
func stopWithSig(cmd *exec.Cmd, sig os.Signal) error {
err := cmd.Process.Signal(sig)
if err != nil {
return err
}
errc := make(chan error)
go func() {
_, ew := cmd.Process.Wait()
errc <- ew
close(errc)
}()
select {
case <-time.After(5 * time.Second):
cmd.Process.Kill()
case e := <-errc:
return e
}
err = <-errc
return err
}
func cleanPageCache() error {
// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
// https://github.com/torvalds/linux/blob/master/fs/drop_caches.c

View File

@ -18,6 +18,10 @@ import (
"fmt"
"reflect"
"strings"
"github.com/coreos/etcd/embed"
"github.com/coreos/etcd/pkg/transport"
"github.com/coreos/etcd/pkg/types"
)
var etcdFields = []string{
@ -53,12 +57,16 @@ var etcdFields = []string{
"PreVote",
"InitialCorruptCheck",
"Logger",
"LogOutput",
"Debug",
}
// Flags returns etcd flags in string slice.
func (cfg *Etcd) Flags() (fs []string) {
tp := reflect.TypeOf(*cfg)
vo := reflect.ValueOf(*cfg)
func (e *Etcd) Flags() (fs []string) {
tp := reflect.TypeOf(*e)
vo := reflect.ValueOf(*e)
for _, name := range etcdFields {
field, ok := tp.FieldByName(name)
if !ok {
@ -97,3 +105,70 @@ func (cfg *Etcd) Flags() (fs []string) {
}
return fs
}
// EmbedConfig returns etcd embed.Config.
func (e *Etcd) EmbedConfig() (cfg *embed.Config, err error) {
var lcURLs types.URLs
lcURLs, err = types.NewURLs(e.ListenClientURLs)
if err != nil {
return nil, err
}
var acURLs types.URLs
acURLs, err = types.NewURLs(e.AdvertiseClientURLs)
if err != nil {
return nil, err
}
var lpURLs types.URLs
lpURLs, err = types.NewURLs(e.ListenPeerURLs)
if err != nil {
return nil, err
}
var apURLs types.URLs
apURLs, err = types.NewURLs(e.AdvertisePeerURLs)
if err != nil {
return nil, err
}
cfg = embed.NewConfig()
cfg.Name = e.Name
cfg.Dir = e.DataDir
cfg.WalDir = e.WALDir
cfg.TickMs = uint(e.HeartbeatIntervalMs)
cfg.ElectionMs = uint(e.ElectionTimeoutMs)
cfg.LCUrls = lcURLs
cfg.ACUrls = acURLs
cfg.ClientAutoTLS = e.ClientAutoTLS
cfg.ClientTLSInfo = transport.TLSInfo{
ClientCertAuth: e.ClientCertAuth,
CertFile: e.ClientCertFile,
KeyFile: e.ClientKeyFile,
TrustedCAFile: e.ClientTrustedCAFile,
}
cfg.LPUrls = lpURLs
cfg.APUrls = apURLs
cfg.PeerAutoTLS = e.PeerAutoTLS
cfg.PeerTLSInfo = transport.TLSInfo{
ClientCertAuth: e.PeerClientCertAuth,
CertFile: e.PeerCertFile,
KeyFile: e.PeerKeyFile,
TrustedCAFile: e.PeerTrustedCAFile,
}
cfg.InitialCluster = e.InitialCluster
cfg.ClusterState = e.InitialClusterState
cfg.InitialClusterToken = e.InitialClusterToken
cfg.SnapCount = uint64(e.SnapshotCount)
cfg.QuotaBackendBytes = e.QuotaBackendBytes
cfg.PreVote = e.PreVote
cfg.ExperimentalInitialCorruptCheck = e.InitialCorruptCheck
cfg.Logger = e.Logger
cfg.LogOutput = e.LogOutput
cfg.Debug = e.Debug
return cfg, nil
}

View File

@ -17,13 +17,16 @@ package rpcpb
import (
"reflect"
"testing"
"github.com/coreos/etcd/embed"
"github.com/coreos/etcd/pkg/types"
)
func TestEtcdFlags(t *testing.T) {
cfg := &Etcd{
func TestEtcd(t *testing.T) {
e := &Etcd{
Name: "s1",
DataDir: "/tmp/etcd-agent-data-1/etcd.data",
WALDir: "/tmp/etcd-agent-data-1/etcd.data/member/wal",
DataDir: "/tmp/etcd-functionl-1/etcd.data",
WALDir: "/tmp/etcd-functionl-1/etcd.data/member/wal",
HeartbeatIntervalMs: 100,
ElectionTimeoutMs: 1000,
@ -53,12 +56,16 @@ func TestEtcdFlags(t *testing.T) {
PreVote: true,
InitialCorruptCheck: true,
Logger: "zap",
LogOutput: "/tmp/etcd-functional-1/etcd.log",
Debug: true,
}
exp := []string{
exps := []string{
"--name=s1",
"--data-dir=/tmp/etcd-agent-data-1/etcd.data",
"--wal-dir=/tmp/etcd-agent-data-1/etcd.data/member/wal",
"--data-dir=/tmp/etcd-functionl-1/etcd.data",
"--wal-dir=/tmp/etcd-functionl-1/etcd.data/member/wal",
"--heartbeat-interval=100",
"--election-timeout=1000",
"--listen-client-urls=https://127.0.0.1:1379",
@ -76,9 +83,63 @@ func TestEtcdFlags(t *testing.T) {
"--quota-backend-bytes=10740000000",
"--pre-vote=true",
"--experimental-initial-corrupt-check=true",
"--logger=zap",
"--log-output=/tmp/etcd-functional-1/etcd.log",
"--debug=true",
}
fs := cfg.Flags()
if !reflect.DeepEqual(exp, fs) {
t.Fatalf("expected %q, got %q", exp, fs)
fs := e.Flags()
if !reflect.DeepEqual(exps, fs) {
t.Fatalf("expected %q, got %q", exps, fs)
}
var err error
var lcURLs types.URLs
lcURLs, err = types.NewURLs([]string{"https://127.0.0.1:1379"})
if err != nil {
t.Fatal(err)
}
var acURLs types.URLs
acURLs, err = types.NewURLs([]string{"https://127.0.0.1:13790"})
if err != nil {
t.Fatal(err)
}
var lpURLs types.URLs
lpURLs, err = types.NewURLs([]string{"https://127.0.0.1:1380"})
if err != nil {
t.Fatal(err)
}
var apURLs types.URLs
apURLs, err = types.NewURLs([]string{"https://127.0.0.1:13800"})
if err != nil {
t.Fatal(err)
}
expc := embed.NewConfig()
expc.Name = "s1"
expc.Dir = "/tmp/etcd-functionl-1/etcd.data"
expc.WalDir = "/tmp/etcd-functionl-1/etcd.data/member/wal"
expc.TickMs = 100
expc.ElectionMs = 1000
expc.LCUrls = lcURLs
expc.ACUrls = acURLs
expc.ClientAutoTLS = true
expc.LPUrls = lpURLs
expc.APUrls = apURLs
expc.PeerAutoTLS = true
expc.InitialCluster = "s1=https://127.0.0.1:13800,s2=https://127.0.0.1:23800,s3=https://127.0.0.1:33800"
expc.ClusterState = "new"
expc.InitialClusterToken = "tkn"
expc.SnapCount = 10000
expc.QuotaBackendBytes = 10740000000
expc.PreVote = true
expc.ExperimentalInitialCorruptCheck = true
expc.Logger = "zap"
expc.LogOutput = "/tmp/etcd-functional-1/etcd.log"
expc.Debug = true
cfg, err := e.EmbedConfig()
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(expc, cfg) {
t.Fatalf("expected %+v, got %+v", expc, cfg)
}
}

View File

@ -613,16 +613,14 @@ func (*Response) ProtoMessage() {}
func (*Response) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{2} }
type Member struct {
// EtcdExecPath is the executable etcd binary path in agent server.
EtcdExecPath string `protobuf:"bytes,1,opt,name=EtcdExecPath,proto3" json:"EtcdExecPath,omitempty" yaml:"etcd-exec-path"`
// EtcdExec is the executable etcd binary path in agent server.
EtcdExec string `protobuf:"bytes,1,opt,name=EtcdExec,proto3" json:"EtcdExec,omitempty" yaml:"etcd-exec"`
// AgentAddr is the agent HTTP server address.
AgentAddr string `protobuf:"bytes,11,opt,name=AgentAddr,proto3" json:"AgentAddr,omitempty" yaml:"agent-addr"`
// FailpointHTTPAddr is the agent's failpoints HTTP server address.
FailpointHTTPAddr string `protobuf:"bytes,12,opt,name=FailpointHTTPAddr,proto3" json:"FailpointHTTPAddr,omitempty" yaml:"failpoint-http-addr"`
// BaseDir is the base directory where all logs and etcd data are stored.
BaseDir string `protobuf:"bytes,101,opt,name=BaseDir,proto3" json:"BaseDir,omitempty" yaml:"base-dir"`
// EtcdLogPath is the log file to store current etcd server logs.
EtcdLogPath string `protobuf:"bytes,102,opt,name=EtcdLogPath,proto3" json:"EtcdLogPath,omitempty" yaml:"etcd-log-path"`
// EtcdClientProxy is true when client traffic needs to be proxied.
// If true, listen client URL port must be different than advertise client URL port.
EtcdClientProxy bool `protobuf:"varint,201,opt,name=EtcdClientProxy,proto3" json:"EtcdClientProxy,omitempty" yaml:"etcd-client-proxy"`
@ -761,6 +759,10 @@ type Etcd struct {
QuotaBackendBytes int64 `protobuf:"varint,52,opt,name=QuotaBackendBytes,proto3" json:"QuotaBackendBytes,omitempty" yaml:"quota-backend-bytes"`
PreVote bool `protobuf:"varint,63,opt,name=PreVote,proto3" json:"PreVote,omitempty" yaml:"pre-vote"`
InitialCorruptCheck bool `protobuf:"varint,64,opt,name=InitialCorruptCheck,proto3" json:"InitialCorruptCheck,omitempty" yaml:"initial-corrupt-check"`
Logger string `protobuf:"bytes,71,opt,name=Logger,proto3" json:"Logger,omitempty" yaml:"logger"`
// LogOutput is the log file to store current etcd server logs.
LogOutput string `protobuf:"bytes,72,opt,name=LogOutput,proto3" json:"LogOutput,omitempty" yaml:"log-output"`
Debug bool `protobuf:"varint,73,opt,name=Debug,proto3" json:"Debug,omitempty" yaml:"debug"`
}
func (m *Etcd) Reset() { *m = Etcd{} }
@ -1075,11 +1077,11 @@ func (m *Member) MarshalTo(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if len(m.EtcdExecPath) > 0 {
if len(m.EtcdExec) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintRpc(dAtA, i, uint64(len(m.EtcdExecPath)))
i += copy(dAtA[i:], m.EtcdExecPath)
i = encodeVarintRpc(dAtA, i, uint64(len(m.EtcdExec)))
i += copy(dAtA[i:], m.EtcdExec)
}
if len(m.AgentAddr) > 0 {
dAtA[i] = 0x5a
@ -1101,14 +1103,6 @@ func (m *Member) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintRpc(dAtA, i, uint64(len(m.BaseDir)))
i += copy(dAtA[i:], m.BaseDir)
}
if len(m.EtcdLogPath) > 0 {
dAtA[i] = 0xb2
i++
dAtA[i] = 0x6
i++
i = encodeVarintRpc(dAtA, i, uint64(len(m.EtcdLogPath)))
i += copy(dAtA[i:], m.EtcdLogPath)
}
if m.EtcdClientProxy {
dAtA[i] = 0xc8
i++
@ -1787,6 +1781,34 @@ func (m *Etcd) MarshalTo(dAtA []byte) (int, error) {
}
i++
}
if len(m.Logger) > 0 {
dAtA[i] = 0xba
i++
dAtA[i] = 0x4
i++
i = encodeVarintRpc(dAtA, i, uint64(len(m.Logger)))
i += copy(dAtA[i:], m.Logger)
}
if len(m.LogOutput) > 0 {
dAtA[i] = 0xc2
i++
dAtA[i] = 0x4
i++
i = encodeVarintRpc(dAtA, i, uint64(len(m.LogOutput)))
i += copy(dAtA[i:], m.LogOutput)
}
if m.Debug {
dAtA[i] = 0xc8
i++
dAtA[i] = 0x4
i++
if m.Debug {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
return i, nil
}
@ -1881,7 +1903,7 @@ func (m *Response) Size() (n int) {
func (m *Member) Size() (n int) {
var l int
_ = l
l = len(m.EtcdExecPath)
l = len(m.EtcdExec)
if l > 0 {
n += 1 + l + sovRpc(uint64(l))
}
@ -1897,10 +1919,6 @@ func (m *Member) Size() (n int) {
if l > 0 {
n += 2 + l + sovRpc(uint64(l))
}
l = len(m.EtcdLogPath)
if l > 0 {
n += 2 + l + sovRpc(uint64(l))
}
if m.EtcdClientProxy {
n += 3
}
@ -2178,6 +2196,17 @@ func (m *Etcd) Size() (n int) {
if m.InitialCorruptCheck {
n += 3
}
l = len(m.Logger)
if l > 0 {
n += 2 + l + sovRpc(uint64(l))
}
l = len(m.LogOutput)
if l > 0 {
n += 2 + l + sovRpc(uint64(l))
}
if m.Debug {
n += 3
}
return n
}
@ -2806,7 +2835,7 @@ func (m *Member) Unmarshal(dAtA []byte) error {
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EtcdExecPath", wireType)
return fmt.Errorf("proto: wrong wireType = %d for field EtcdExec", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@ -2831,7 +2860,7 @@ func (m *Member) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.EtcdExecPath = string(dAtA[iNdEx:postIndex])
m.EtcdExec = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 11:
if wireType != 2 {
@ -2920,35 +2949,6 @@ func (m *Member) Unmarshal(dAtA []byte) error {
}
m.BaseDir = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 102:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EtcdLogPath", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRpc
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthRpc
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.EtcdLogPath = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 201:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field EtcdClientProxy", wireType)
@ -4803,6 +4803,84 @@ func (m *Etcd) Unmarshal(dAtA []byte) error {
}
}
m.InitialCorruptCheck = bool(v != 0)
case 71:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Logger", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRpc
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthRpc
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Logger = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 72:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field LogOutput", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRpc
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthRpc
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.LogOutput = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 73:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Debug", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRpc
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Debug = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipRpc(dAtA[iNdEx:])
@ -4932,181 +5010,184 @@ var (
func init() { proto.RegisterFile("rpcpb/rpc.proto", fileDescriptorRpc) }
var fileDescriptorRpc = []byte{
// 2808 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x59, 0xdb, 0x73, 0xdb, 0xc6,
0xf5, 0x16, 0x44, 0x5d, 0x57, 0x37, 0x68, 0x65, 0xd9, 0xf0, 0x4d, 0x90, 0xe1, 0x38, 0x3f, 0x59,
0x09, 0xec, 0xfc, 0xec, 0x4c, 0x2e, 0x4e, 0x13, 0x07, 0xa4, 0x20, 0x8b, 0x15, 0x44, 0xd2, 0x4b,
0xc8, 0x76, 0x9e, 0x38, 0x10, 0xb9, 0x92, 0x30, 0xa6, 0x00, 0x06, 0x58, 0x2a, 0x52, 0xfe, 0x81,
0xbe, 0xf6, 0x3e, 0xed, 0x4c, 0x9f, 0xfa, 0xdc, 0xb4, 0xff, 0x86, 0x73, 0x6b, 0xd3, 0xf6, 0xa9,
0xed, 0x0c, 0xa7, 0x4d, 0x5f, 0xfa, 0xd4, 0x07, 0x4e, 0x6f, 0xe9, 0x53, 0x67, 0x77, 0x01, 0x71,
0x01, 0x90, 0x92, 0x9e, 0xa4, 0x3d, 0xe7, 0xfb, 0xbe, 0x3d, 0xbb, 0x67, 0xb1, 0xe7, 0x00, 0x04,
0x73, 0x41, 0xab, 0xde, 0xda, 0xb9, 0x1b, 0xb4, 0xea, 0x77, 0x5a, 0x81, 0x4f, 0x7c, 0x38, 0xca,
0x0c, 0x57, 0xf4, 0x3d, 0x97, 0xec, 0xb7, 0x77, 0xee, 0xd4, 0xfd, 0x83, 0xbb, 0x7b, 0xfe, 0x9e,
0x7f, 0x97, 0x79, 0x77, 0xda, 0xbb, 0x6c, 0xc4, 0x06, 0xec, 0x3f, 0xce, 0xd2, 0xbe, 0x23, 0x81,
0x71, 0x84, 0x3f, 0x6c, 0xe3, 0x90, 0xc0, 0x3b, 0x60, 0xb2, 0xdc, 0xc2, 0x81, 0x43, 0x5c, 0xdf,
0x53, 0xa4, 0x65, 0x69, 0x65, 0xf6, 0x9e, 0x7c, 0x87, 0xa9, 0xde, 0x39, 0xb1, 0xa3, 0x1e, 0x04,
0xde, 0x02, 0x63, 0x5b, 0xf8, 0x60, 0x07, 0x07, 0xca, 0xf0, 0xb2, 0xb4, 0x32, 0x75, 0x6f, 0x26,
0x02, 0x73, 0x23, 0x8a, 0x9c, 0x14, 0x66, 0xe3, 0x90, 0xe0, 0x40, 0xc9, 0x25, 0x60, 0xdc, 0x88,
0x22, 0xa7, 0xf6, 0xb7, 0x61, 0x30, 0x5d, 0xf5, 0x9c, 0x56, 0xb8, 0xef, 0x93, 0xa2, 0xb7, 0xeb,
0xc3, 0x25, 0x00, 0xb8, 0x42, 0xc9, 0x39, 0xc0, 0x2c, 0x9e, 0x49, 0x24, 0x58, 0xe0, 0x2a, 0x90,
0xf9, 0xa8, 0xd0, 0x74, 0xb1, 0x47, 0xb6, 0x91, 0x15, 0x2a, 0xc3, 0xcb, 0xb9, 0x95, 0x49, 0x94,
0xb1, 0x43, 0xad, 0xa7, 0x5d, 0x71, 0xc8, 0x3e, 0x8b, 0x64, 0x12, 0x25, 0x6c, 0x54, 0x2f, 0x1e,
0xaf, 0xbb, 0x4d, 0x5c, 0x75, 0x3f, 0xc6, 0xca, 0x08, 0xc3, 0x65, 0xec, 0xf0, 0x55, 0x30, 0x1f,
0xdb, 0x6c, 0x9f, 0x38, 0x4d, 0x06, 0x1e, 0x65, 0xe0, 0xac, 0x43, 0x54, 0x66, 0xc6, 0x4d, 0x7c,
0xac, 0x8c, 0x2d, 0x4b, 0x2b, 0x39, 0x94, 0xb1, 0x8b, 0x91, 0x6e, 0x38, 0xe1, 0xbe, 0x32, 0xce,
0x70, 0x09, 0x9b, 0xa8, 0x87, 0xf0, 0xa1, 0x1b, 0xd2, 0x7c, 0x4d, 0x24, 0xf5, 0x62, 0x3b, 0x84,
0x60, 0xc4, 0xf6, 0xfd, 0xe7, 0xca, 0x24, 0x0b, 0x8e, 0xfd, 0xaf, 0xfd, 0x4c, 0x02, 0x13, 0x08,
0x87, 0x2d, 0xdf, 0x0b, 0x31, 0x54, 0xc0, 0x78, 0xb5, 0x5d, 0xaf, 0xe3, 0x30, 0x64, 0x7b, 0x3c,
0x81, 0xe2, 0x21, 0xbc, 0x08, 0xc6, 0xaa, 0xc4, 0x21, 0xed, 0x90, 0xe5, 0x77, 0x12, 0x45, 0x23,
0x21, 0xef, 0xb9, 0xd3, 0xf2, 0xfe, 0x66, 0x32, 0x9f, 0x6c, 0x2f, 0xa7, 0xee, 0x2d, 0x44, 0x60,
0xd1, 0x85, 0x12, 0x40, 0xed, 0x4f, 0xd3, 0xf1, 0x04, 0xf0, 0x5d, 0x30, 0x6d, 0x92, 0x7a, 0xc3,
0x3c, 0xc2, 0x75, 0x96, 0x37, 0x76, 0x0a, 0xf2, 0x97, 0xbb, 0x1d, 0x75, 0xf1, 0xd8, 0x39, 0x68,
0x3e, 0xd0, 0x30, 0xa9, 0x37, 0x74, 0x7c, 0x84, 0xeb, 0x7a, 0xcb, 0x21, 0xfb, 0x1a, 0x4a, 0xc0,
0xe1, 0x7d, 0x30, 0x69, 0xec, 0x61, 0x8f, 0x18, 0x8d, 0x46, 0xa0, 0x4c, 0x31, 0xee, 0x62, 0xb7,
0xa3, 0xce, 0x73, 0xae, 0x43, 0x5d, 0xba, 0xd3, 0x68, 0x04, 0x1a, 0xea, 0xe1, 0xa0, 0x05, 0xe6,
0xd7, 0x1d, 0xb7, 0xd9, 0xf2, 0x5d, 0x8f, 0x6c, 0xd8, 0x76, 0x85, 0x91, 0xa7, 0x19, 0x79, 0xa9,
0xdb, 0x51, 0xaf, 0x70, 0xf2, 0x6e, 0x0c, 0xd1, 0xf7, 0x09, 0x69, 0x45, 0x2a, 0x59, 0x22, 0xd4,
0xc1, 0x78, 0xde, 0x09, 0xf1, 0x9a, 0x1b, 0x28, 0x98, 0x69, 0x2c, 0x74, 0x3b, 0xea, 0x1c, 0xd7,
0xd8, 0x71, 0x42, 0xac, 0x37, 0xdc, 0x40, 0x43, 0x31, 0x06, 0x3e, 0x00, 0x53, 0x74, 0x05, 0x96,
0xbf, 0xc7, 0xd6, 0xbb, 0xcb, 0x28, 0x4a, 0xb7, 0xa3, 0x5e, 0x10, 0xd6, 0xdb, 0xf4, 0xf7, 0xa2,
0xe5, 0x8a, 0x60, 0xf8, 0x08, 0xcc, 0xd1, 0x21, 0x3f, 0xf6, 0x95, 0xc0, 0x3f, 0x3a, 0x56, 0x3e,
0x65, 0x29, 0xcd, 0x5f, 0xeb, 0x76, 0x54, 0x45, 0x10, 0xa8, 0x33, 0x88, 0xde, 0xa2, 0x18, 0x0d,
0xa5, 0x59, 0xd0, 0x00, 0x33, 0xd4, 0x54, 0xc1, 0x38, 0xe0, 0x32, 0x9f, 0x71, 0x99, 0x2b, 0xdd,
0x8e, 0x7a, 0x51, 0x90, 0x69, 0x61, 0x1c, 0xc4, 0x22, 0x49, 0x06, 0xac, 0x00, 0xd8, 0x53, 0x35,
0xbd, 0x06, 0xdb, 0x14, 0xe5, 0x13, 0x76, 0x90, 0xf2, 0x6a, 0xb7, 0xa3, 0x5e, 0xcd, 0x86, 0x83,
0x23, 0x98, 0x86, 0xfa, 0x70, 0xe1, 0xff, 0x83, 0x11, 0x6a, 0x55, 0x7e, 0xc9, 0x2f, 0x9b, 0xa9,
0xe8, 0x1c, 0x51, 0x5b, 0x7e, 0xae, 0xdb, 0x51, 0xa7, 0x7a, 0x82, 0x1a, 0x62, 0x50, 0x98, 0x07,
0x8b, 0xf4, 0x6f, 0xd9, 0xeb, 0x3d, 0x15, 0x21, 0xf1, 0x03, 0xac, 0xfc, 0x2a, 0xab, 0x81, 0xfa,
0x43, 0xe1, 0x1a, 0x98, 0xe5, 0x81, 0x14, 0x70, 0x40, 0xd6, 0x1c, 0xe2, 0x28, 0xdf, 0x63, 0x97,
0x47, 0xfe, 0x6a, 0xb7, 0xa3, 0x5e, 0xe2, 0x73, 0x46, 0xf1, 0xd7, 0x71, 0x40, 0xf4, 0x86, 0x43,
0x1c, 0x0d, 0xa5, 0x38, 0x49, 0x15, 0x96, 0xd9, 0xef, 0x9f, 0xaa, 0xc2, 0xb3, 0x9b, 0xe2, 0xd0,
0xbc, 0x70, 0xcb, 0x26, 0x3e, 0x66, 0xa1, 0xfc, 0x80, 0x8b, 0x08, 0x79, 0x89, 0x44, 0x9e, 0xe3,
0xe3, 0x28, 0x92, 0x24, 0x23, 0x21, 0xc1, 0xe2, 0xf8, 0xe1, 0x69, 0x12, 0x3c, 0x8c, 0x24, 0x03,
0xda, 0x60, 0x81, 0x1b, 0xec, 0xa0, 0x1d, 0x12, 0xdc, 0x28, 0x18, 0x2c, 0x96, 0x1f, 0x71, 0xa1,
0x1b, 0xdd, 0x8e, 0x7a, 0x3d, 0x21, 0x44, 0x38, 0x4c, 0xaf, 0x3b, 0x51, 0x48, 0xfd, 0xe8, 0x7d,
0x54, 0x59, 0x78, 0x3f, 0x3e, 0x87, 0x2a, 0x8f, 0xb2, 0x1f, 0x1d, 0xbe, 0x07, 0xa6, 0xe9, 0x99,
0x3c, 0xc9, 0xdd, 0x3f, 0x73, 0xe9, 0x0b, 0x84, 0x9d, 0x61, 0x21, 0x73, 0x09, 0xbc, 0xc8, 0x67,
0xe1, 0xfc, 0xeb, 0x14, 0x7e, 0x74, 0x01, 0x89, 0x78, 0xf8, 0x0e, 0x98, 0xa2, 0xe3, 0x38, 0x5f,
0xff, 0xce, 0xa5, 0x9f, 0x67, 0x46, 0xef, 0x65, 0x4b, 0x44, 0x0b, 0x64, 0x36, 0xf7, 0x7f, 0x06,
0x93, 0xa3, 0xcb, 0x40, 0x40, 0xc3, 0x12, 0x98, 0xa7, 0xc3, 0x64, 0x8e, 0xbe, 0xc9, 0xa5, 0x9f,
0x3f, 0x26, 0x91, 0xc9, 0x50, 0x96, 0x9a, 0xd1, 0x63, 0x21, 0xfd, 0xf7, 0x4c, 0x3d, 0x1e, 0x59,
0x96, 0x4a, 0x6f, 0xf6, 0x44, 0x45, 0xfe, 0xc3, 0x48, 0x7a, 0x75, 0x61, 0xe4, 0x8e, 0x37, 0x36,
0x51, 0xac, 0xdf, 0x4a, 0x15, 0x97, 0x3f, 0x9e, 0xbb, 0xba, 0xfc, 0x7c, 0x3a, 0xee, 0x47, 0xe8,
0xdd, 0x4c, 0xd7, 0x46, 0xef, 0x66, 0x29, 0x7d, 0x37, 0xd3, 0x8d, 0x88, 0xee, 0xe6, 0x08, 0x03,
0x5f, 0x05, 0xe3, 0x25, 0x4c, 0x3e, 0xf2, 0x83, 0xe7, 0xbc, 0x20, 0xe6, 0x61, 0xb7, 0xa3, 0xce,
0x72, 0xb8, 0xc7, 0x1d, 0x1a, 0x8a, 0x21, 0xf0, 0x26, 0x18, 0x61, 0x95, 0x83, 0x6f, 0x91, 0x70,
0x43, 0xf1, 0x52, 0xc1, 0x9c, 0xb0, 0x00, 0x66, 0xd7, 0x70, 0xd3, 0x39, 0xb6, 0x1c, 0x82, 0xbd,
0xfa, 0xf1, 0x56, 0xc8, 0xaa, 0xd4, 0x8c, 0x78, 0x2d, 0x34, 0xa8, 0x5f, 0x6f, 0x72, 0x80, 0x7e,
0x10, 0x6a, 0x28, 0x45, 0x81, 0xdf, 0x06, 0x72, 0xd2, 0x82, 0x0e, 0x59, 0xbd, 0x9a, 0x11, 0xeb,
0x55, 0x5a, 0x46, 0x0f, 0x0e, 0x35, 0x94, 0xe1, 0xc1, 0x0f, 0xc0, 0xe2, 0x76, 0xab, 0xe1, 0x10,
0xdc, 0x48, 0xc5, 0x35, 0xc3, 0x04, 0x6f, 0x76, 0x3b, 0xaa, 0xca, 0x05, 0xdb, 0x1c, 0xa6, 0x67,
0xe3, 0xeb, 0xaf, 0x00, 0xdf, 0x00, 0x00, 0xf9, 0x6d, 0xaf, 0x61, 0xb9, 0x07, 0x2e, 0x51, 0x16,
0x97, 0xa5, 0x95, 0xd1, 0xfc, 0xc5, 0x6e, 0x47, 0x85, 0x5c, 0x2f, 0xa0, 0x3e, 0xbd, 0x49, 0x9d,
0x1a, 0x12, 0x90, 0x30, 0x0f, 0x66, 0xcd, 0x23, 0x97, 0x94, 0xbd, 0x82, 0x13, 0x62, 0x5a, 0x60,
0x95, 0x8b, 0x99, 0x6a, 0x74, 0xe4, 0x12, 0xdd, 0xf7, 0x74, 0x5a, 0x94, 0xdb, 0x01, 0xd6, 0x50,
0x8a, 0x01, 0xdf, 0x06, 0x53, 0xa6, 0xe7, 0xec, 0x34, 0x71, 0xa5, 0x15, 0xf8, 0xbb, 0xca, 0x25,
0x26, 0x70, 0xa9, 0xdb, 0x51, 0x17, 0x22, 0x01, 0xe6, 0xd4, 0x5b, 0xd4, 0x4b, 0xab, 0x6a, 0x0f,
0x4b, 0x2b, 0x32, 0x95, 0x61, 0x8b, 0xd9, 0x0a, 0x15, 0x95, 0xed, 0x83, 0x70, 0x4c, 0xeb, 0xac,
0x88, 0xb3, 0x4d, 0xa0, 0x8b, 0x17, 0xc1, 0x74, 0x5a, 0x3a, 0xac, 0xee, 0xb7, 0x77, 0x77, 0x9b,
0x58, 0x59, 0x4e, 0x4f, 0xcb, 0xb8, 0x21, 0xf7, 0x46, 0xd4, 0x08, 0x0b, 0x5f, 0x06, 0xa3, 0x74,
0x18, 0x2a, 0x37, 0x68, 0x4b, 0x9b, 0x97, 0xbb, 0x1d, 0x75, 0xba, 0x47, 0x0a, 0x35, 0xc4, 0xdd,
0x70, 0x53, 0xe8, 0x56, 0x0a, 0xfe, 0xc1, 0x81, 0xe3, 0x35, 0x42, 0x45, 0x63, 0x9c, 0xeb, 0xdd,
0x8e, 0x7a, 0x39, 0xdd, 0xad, 0xd4, 0x23, 0x8c, 0xd8, 0xac, 0xc4, 0x3c, 0x7a, 0x1c, 0x51, 0xdb,
0xf3, 0x70, 0x70, 0xd2, 0x70, 0xdd, 0x4e, 0x57, 0xa9, 0x80, 0xf9, 0xc5, 0x96, 0x2b, 0x45, 0x81,
0x45, 0x20, 0x9b, 0x47, 0x04, 0x07, 0x9e, 0xd3, 0x3c, 0x91, 0x59, 0x65, 0x32, 0x42, 0x40, 0x38,
0x42, 0x88, 0x42, 0x19, 0x1a, 0xbc, 0x07, 0x26, 0xab, 0x24, 0xc0, 0x61, 0x88, 0x83, 0x50, 0xc1,
0x6c, 0x51, 0x17, 0xba, 0x1d, 0x55, 0x8e, 0x2e, 0x88, 0xd8, 0xa5, 0xa1, 0x1e, 0x0c, 0xde, 0x05,
0x13, 0x85, 0x7d, 0x5c, 0x7f, 0x4e, 0x29, 0xbb, 0x8c, 0x22, 0x3c, 0xd5, 0xf5, 0xc8, 0xa3, 0xa1,
0x13, 0x10, 0x2d, 0x89, 0x9c, 0xbd, 0x89, 0x8f, 0x59, 0x1f, 0xcf, 0x9a, 0xa6, 0x51, 0xf1, 0x7c,
0xf1, 0x99, 0xd8, 0x55, 0x1b, 0xba, 0x1f, 0x63, 0x0d, 0x25, 0x19, 0xf0, 0x31, 0x80, 0x09, 0x83,
0xe5, 0x04, 0x7b, 0x98, 0x77, 0x4d, 0xa3, 0xf9, 0xe5, 0x6e, 0x47, 0xbd, 0xd6, 0x57, 0x47, 0x6f,
0x52, 0x9c, 0x86, 0xfa, 0x90, 0xe1, 0x53, 0x70, 0xa1, 0x67, 0x6d, 0xef, 0xee, 0xba, 0x47, 0xc8,
0xf1, 0xf6, 0xb0, 0xf2, 0x39, 0x17, 0xd5, 0xba, 0x1d, 0x75, 0x29, 0x2b, 0xca, 0x80, 0x7a, 0x40,
0x91, 0x1a, 0xea, 0x2b, 0x00, 0x1d, 0x70, 0xa9, 0x9f, 0xdd, 0x3e, 0xf2, 0x94, 0x2f, 0xb8, 0xf6,
0xcb, 0xdd, 0x8e, 0xaa, 0x9d, 0xaa, 0xad, 0x93, 0x23, 0x4f, 0x43, 0x83, 0x74, 0xe0, 0x06, 0x98,
0x3b, 0x71, 0xd9, 0x47, 0x5e, 0xb9, 0x15, 0x2a, 0x5f, 0x72, 0x69, 0xe1, 0x04, 0x08, 0xd2, 0xe4,
0xc8, 0xd3, 0xfd, 0x56, 0xa8, 0xa1, 0x34, 0x0d, 0xbe, 0x1f, 0xe7, 0x86, 0x17, 0xf7, 0x90, 0x77,
0x90, 0xa3, 0x62, 0x01, 0x8e, 0x74, 0x78, 0x5b, 0x10, 0x9e, 0xa4, 0x26, 0x22, 0xc0, 0xd7, 0xe3,
0x23, 0xf4, 0xb8, 0x52, 0xe5, 0xbd, 0xe3, 0xa8, 0xf8, 0x0e, 0x10, 0xb1, 0x3f, 0x6c, 0xf5, 0x0e,
0xd1, 0xe3, 0x4a, 0x55, 0xfb, 0x66, 0x86, 0x77, 0x9b, 0xf4, 0x16, 0xef, 0xbd, 0x7e, 0x8a, 0xb7,
0xb8, 0xe7, 0x1c, 0x60, 0x0d, 0x31, 0xa7, 0x58, 0x47, 0x86, 0xcf, 0x51, 0x47, 0x56, 0xc1, 0xd8,
0x53, 0xc3, 0xa2, 0xe8, 0x5c, 0xba, 0x8c, 0x7c, 0xe4, 0x34, 0x39, 0x38, 0x42, 0xc0, 0x32, 0x58,
0xd8, 0xc0, 0x4e, 0x40, 0x76, 0xb0, 0x43, 0x8a, 0x1e, 0xc1, 0xc1, 0xa1, 0xd3, 0x8c, 0xaa, 0x44,
0x4e, 0xdc, 0xcd, 0xfd, 0x18, 0xa4, 0xbb, 0x11, 0x4a, 0x43, 0xfd, 0x98, 0xb0, 0x08, 0xe6, 0xcd,
0x26, 0xae, 0xd3, 0x17, 0x78, 0xdb, 0x3d, 0xc0, 0x7e, 0x9b, 0x6c, 0x85, 0xac, 0x5a, 0xe4, 0xc4,
0xa7, 0x1c, 0x47, 0x10, 0x9d, 0x70, 0x8c, 0x86, 0xb2, 0x2c, 0xfa, 0xa0, 0x5b, 0x6e, 0x48, 0xb0,
0x27, 0xbc, 0x80, 0x2f, 0xa6, 0x6f, 0x9e, 0x26, 0x43, 0xc4, 0x2d, 0x7e, 0x3b, 0x68, 0x86, 0x1a,
0xca, 0xd0, 0x20, 0x02, 0x0b, 0x46, 0xe3, 0x10, 0x07, 0xc4, 0x0d, 0xb1, 0xa0, 0x76, 0x91, 0xa9,
0x09, 0x0f, 0x90, 0x13, 0x83, 0x92, 0x82, 0xfd, 0xc8, 0xf0, 0xed, 0xb8, 0xd5, 0x35, 0xda, 0xc4,
0xb7, 0xad, 0x6a, 0x74, 0xeb, 0x0b, 0xb9, 0x71, 0xda, 0xc4, 0xd7, 0x09, 0x15, 0x48, 0x22, 0xe9,
0x3d, 0xd8, 0x6b, 0xbd, 0x8d, 0x36, 0xd9, 0x57, 0x14, 0xc6, 0x1d, 0xd0, 0xad, 0x3b, 0xed, 0x54,
0xb7, 0x4e, 0x29, 0xf0, 0x5b, 0xa2, 0xc8, 0xba, 0xdb, 0xc4, 0xca, 0x65, 0x96, 0x6e, 0xe1, 0x06,
0x63, 0xec, 0x5d, 0x97, 0x5e, 0xfe, 0x29, 0x6c, 0x2f, 0xfa, 0x4d, 0x7c, 0xcc, 0xc8, 0x57, 0xd2,
0x27, 0x8b, 0x3e, 0x39, 0x9c, 0x9b, 0x44, 0x42, 0x2b, 0xd3, 0x4a, 0x33, 0x81, 0xab, 0xe9, 0x46,
0x5f, 0x68, 0xd3, 0xb8, 0x4e, 0x3f, 0x1a, 0xdd, 0x0b, 0x9e, 0x2e, 0xda, 0xc3, 0xb1, 0xac, 0xa8,
0x2c, 0x2b, 0xc2, 0x5e, 0x44, 0x39, 0x66, 0xbd, 0x1f, 0x4f, 0x48, 0x8a, 0x02, 0x6d, 0x30, 0x7f,
0x92, 0xa2, 0x13, 0x9d, 0x65, 0xa6, 0x23, 0xdc, 0x36, 0xae, 0xe7, 0x12, 0xd7, 0x69, 0xea, 0xbd,
0x2c, 0x0b, 0x92, 0x59, 0x01, 0x5a, 0x9a, 0xe9, 0xff, 0x71, 0x7e, 0x6f, 0xb0, 0x1c, 0xa5, 0xfb,
0xe3, 0x5e, 0x92, 0x45, 0x30, 0x7d, 0x41, 0x65, 0x9d, 0x7a, 0x32, 0xcd, 0x1a, 0x93, 0x10, 0x0e,
0x1c, 0x6f, 0xef, 0x33, 0xb9, 0xee, 0xc3, 0xa5, 0x1d, 0x6d, 0xdc, 0xfb, 0xb3, 0xfd, 0xbe, 0x39,
0xf8, 0x55, 0x81, 0x6f, 0x77, 0x02, 0x1e, 0x2f, 0x26, 0x4e, 0xf7, 0x4b, 0x03, 0x9b, 0x7d, 0x4e,
0x16, 0xc1, 0x70, 0x2b, 0xd5, 0x9c, 0x33, 0x85, 0x5b, 0x67, 0xf5, 0xe6, 0x5c, 0x28, 0xcb, 0xa4,
0x1d, 0x57, 0x91, 0xa7, 0xa2, 0xd0, 0x6c, 0xb3, 0x2f, 0x77, 0xb7, 0xd3, 0x67, 0x27, 0x4e, 0x55,
0x9d, 0x03, 0x34, 0x94, 0x62, 0xd0, 0x27, 0x3a, 0x69, 0xa9, 0x12, 0x87, 0xe0, 0xa8, 0x11, 0x10,
0x36, 0x38, 0x25, 0xa4, 0x87, 0x14, 0xa6, 0xa1, 0x7e, 0xe4, 0xac, 0xa6, 0xed, 0x3f, 0xc7, 0x9e,
0xf2, 0xca, 0x59, 0x9a, 0x84, 0xc2, 0x32, 0x9a, 0x8c, 0x0c, 0x1f, 0x82, 0x99, 0xf8, 0xf5, 0xa0,
0xe0, 0xb7, 0x3d, 0xa2, 0xdc, 0x67, 0x77, 0xa1, 0x58, 0x60, 0xe2, 0xf7, 0x90, 0x3a, 0xf5, 0xd3,
0x02, 0x23, 0xe2, 0xa1, 0x05, 0xe6, 0x1f, 0xb7, 0x7d, 0xe2, 0xe4, 0x9d, 0xfa, 0x73, 0xec, 0x35,
0xf2, 0xc7, 0x04, 0x87, 0xca, 0xeb, 0x4c, 0x44, 0x68, 0xbf, 0x3f, 0xa4, 0x10, 0x7d, 0x87, 0x63,
0xf4, 0x1d, 0x0a, 0xd2, 0x50, 0x96, 0x48, 0x4b, 0x49, 0x25, 0xc0, 0x4f, 0x7c, 0x82, 0x95, 0x87,
0xe9, 0xeb, 0xaa, 0x15, 0x60, 0xfd, 0xd0, 0xa7, 0xbb, 0x13, 0x63, 0xc4, 0x1d, 0xf1, 0x83, 0xa0,
0xdd, 0x22, 0xac, 0xab, 0x51, 0xde, 0x4f, 0x1f, 0xe3, 0x93, 0x1d, 0xe1, 0x28, 0x9d, 0xf5, 0x41,
0xc2, 0x8e, 0x08, 0xe4, 0xd5, 0x9f, 0xe6, 0x84, 0xef, 0xc0, 0x70, 0x0e, 0x4c, 0x95, 0xca, 0x76,
0xad, 0x6a, 0x1b, 0xc8, 0x36, 0xd7, 0xe4, 0x21, 0x78, 0x11, 0xc0, 0x62, 0xa9, 0x68, 0x17, 0x0d,
0x8b, 0x1b, 0x6b, 0xa6, 0x5d, 0x58, 0x93, 0x01, 0x94, 0xc1, 0x34, 0x32, 0x05, 0xcb, 0x14, 0xb5,
0x54, 0x8b, 0x8f, 0x6c, 0x13, 0x6d, 0x71, 0xcb, 0x05, 0xb8, 0x0c, 0xae, 0x55, 0x8b, 0x8f, 0x1e,
0x6f, 0x17, 0x39, 0xa6, 0x66, 0x94, 0xd6, 0x6a, 0xc8, 0xdc, 0x2a, 0x3f, 0x31, 0x6b, 0x6b, 0x86,
0x6d, 0xc8, 0x8b, 0x70, 0x1e, 0xcc, 0x54, 0x8d, 0x27, 0x66, 0xad, 0x5a, 0x32, 0x2a, 0xd5, 0x8d,
0xb2, 0x2d, 0x2f, 0xc1, 0x1b, 0xe0, 0x3a, 0x15, 0x2e, 0x23, 0xb3, 0x16, 0x4f, 0xb0, 0x8e, 0xca,
0x5b, 0x3d, 0x88, 0x0a, 0x2f, 0x83, 0xc5, 0xfe, 0xae, 0x65, 0xca, 0xce, 0x4c, 0x69, 0xa0, 0xc2,
0x46, 0x31, 0x9e, 0x73, 0x05, 0xde, 0x05, 0xaf, 0x9c, 0x16, 0x15, 0x1b, 0x57, 0xed, 0x72, 0xa5,
0x66, 0x3c, 0x32, 0x4b, 0xb6, 0x7c, 0x1b, 0x5e, 0x07, 0x97, 0xf3, 0x96, 0x51, 0xd8, 0xdc, 0x28,
0x5b, 0x66, 0xad, 0x62, 0x9a, 0xa8, 0x56, 0x29, 0x23, 0xbb, 0x66, 0x3f, 0xab, 0xa1, 0x67, 0x72,
0x03, 0xaa, 0xe0, 0xea, 0x76, 0x69, 0x30, 0x00, 0xc3, 0x2b, 0x60, 0x71, 0xcd, 0xb4, 0x8c, 0x0f,
0x32, 0xae, 0x17, 0x12, 0xbc, 0x06, 0x2e, 0x6d, 0x97, 0xfa, 0x7b, 0x3f, 0x95, 0x56, 0xff, 0x0e,
0xc0, 0x08, 0xed, 0xfb, 0xa1, 0x02, 0x2e, 0xc4, 0x7b, 0x5b, 0x2e, 0x99, 0xb5, 0xf5, 0xb2, 0x65,
0x95, 0x9f, 0x9a, 0x48, 0x1e, 0x8a, 0x56, 0x93, 0xf1, 0xd4, 0xb6, 0x4b, 0x76, 0xd1, 0xaa, 0xd9,
0xa8, 0xf8, 0xe8, 0x91, 0x89, 0x7a, 0x3b, 0x24, 0x41, 0x08, 0x66, 0x63, 0x82, 0x65, 0x1a, 0x6b,
0x26, 0x92, 0x87, 0xe1, 0x6d, 0x70, 0x2b, 0x69, 0x1b, 0x44, 0xcf, 0x89, 0xf4, 0xc7, 0xdb, 0x65,
0xb4, 0xbd, 0x25, 0x8f, 0xd0, 0x43, 0x13, 0xdb, 0x0c, 0xcb, 0x92, 0x47, 0xe1, 0x4d, 0xa0, 0xc6,
0x5b, 0x2c, 0xec, 0x6e, 0x22, 0x72, 0x00, 0x1f, 0x80, 0x37, 0xce, 0x00, 0x0d, 0x8a, 0x62, 0x8a,
0xa6, 0xa4, 0x0f, 0x37, 0x5a, 0xcf, 0x34, 0x7c, 0x1d, 0xbc, 0x36, 0xd0, 0x3d, 0x48, 0x74, 0x06,
0xae, 0x83, 0x7c, 0x1f, 0x16, 0x5f, 0x65, 0x64, 0xe1, 0xe7, 0x32, 0x12, 0x8a, 0xa9, 0xd1, 0x21,
0x2c, 0x20, 0xc3, 0x2e, 0x6c, 0xc8, 0xb3, 0x70, 0x15, 0xbc, 0x3c, 0xf0, 0x38, 0x24, 0x37, 0xa1,
0x01, 0x0d, 0xf0, 0xee, 0xf9, 0xb0, 0x83, 0xc2, 0xc6, 0xf0, 0x25, 0xb0, 0x3c, 0x58, 0x22, 0xda,
0x92, 0x5d, 0xf8, 0x0e, 0x78, 0xf3, 0x2c, 0xd4, 0xa0, 0x29, 0xf6, 0x4e, 0x9f, 0x22, 0x3a, 0x06,
0xfb, 0xf4, 0xd9, 0x1b, 0x8c, 0xa2, 0x07, 0xc3, 0x85, 0xff, 0x07, 0xb4, 0xbe, 0x87, 0x3d, 0xb9,
0x2d, 0x2f, 0x24, 0x78, 0x07, 0xdc, 0x46, 0x46, 0x69, 0xad, 0xbc, 0x55, 0x3b, 0x07, 0xfe, 0x53,
0x09, 0xbe, 0x07, 0xde, 0x3e, 0x1b, 0x38, 0x68, 0x81, 0x9f, 0x49, 0xd0, 0x04, 0xef, 0x9f, 0x7b,
0xbe, 0x41, 0x32, 0x9f, 0x4b, 0xf0, 0x06, 0xb8, 0xd6, 0x9f, 0x1f, 0xe5, 0xe1, 0x0b, 0x09, 0xae,
0x80, 0x9b, 0xa7, 0xce, 0x14, 0x21, 0xbf, 0x94, 0xe0, 0x5b, 0xe0, 0xfe, 0x69, 0x90, 0x41, 0x61,
0xfc, 0x5a, 0x82, 0x0f, 0xc1, 0x83, 0x73, 0xcc, 0x31, 0x48, 0xe0, 0x37, 0xa7, 0xac, 0x23, 0x4a,
0xf6, 0x57, 0x67, 0xaf, 0x23, 0x42, 0xfe, 0x56, 0x82, 0x4b, 0xe0, 0x72, 0x7f, 0x08, 0x3d, 0x13,
0xbf, 0x93, 0xe0, 0x2d, 0xb0, 0x7c, 0xaa, 0x12, 0x85, 0xfd, 0x5e, 0x82, 0x0a, 0x58, 0x28, 0x95,
0x6b, 0xeb, 0x46, 0xd1, 0xaa, 0x3d, 0x2d, 0xda, 0x1b, 0xb5, 0xaa, 0x8d, 0xcc, 0x6a, 0x55, 0xfe,
0xc5, 0x30, 0x0d, 0x25, 0xe1, 0x29, 0x95, 0x23, 0x67, 0x6d, 0xbd, 0x8c, 0x6a, 0x56, 0xf1, 0x89,
0x59, 0xa2, 0xc8, 0x4f, 0x86, 0xe1, 0x1c, 0x00, 0x14, 0x56, 0x29, 0x17, 0x4b, 0x76, 0x55, 0xfe,
0x6e, 0x0e, 0xce, 0x80, 0x09, 0xf3, 0x99, 0x6d, 0xa2, 0x92, 0x61, 0xc9, 0xff, 0xc8, 0xad, 0x1e,
0x80, 0x89, 0xf8, 0xd3, 0x02, 0x1c, 0x03, 0xc3, 0x9b, 0x4f, 0xe4, 0x21, 0x38, 0x09, 0x46, 0x2d,
0xd3, 0xa8, 0x9a, 0xb2, 0x04, 0x17, 0xc0, 0x9c, 0x69, 0x99, 0x05, 0xbb, 0x58, 0x2e, 0xd5, 0xd0,
0x76, 0xa9, 0xc4, 0x2e, 0x4f, 0x19, 0x4c, 0x3f, 0xa5, 0x4f, 0x7e, 0x6c, 0xc9, 0xc1, 0x45, 0x30,
0x6f, 0x95, 0x0b, 0x9b, 0x35, 0x64, 0x14, 0x4c, 0x14, 0x9b, 0x47, 0x28, 0x90, 0x09, 0xc5, 0x96,
0xd1, 0xd5, 0x3c, 0x18, 0x8f, 0xbe, 0x4b, 0xc0, 0x29, 0x30, 0xbe, 0xf9, 0xa4, 0xb6, 0x61, 0x54,
0x37, 0xe4, 0xa1, 0x1e, 0xd2, 0x7c, 0x56, 0x29, 0x22, 0x3a, 0x33, 0x00, 0x63, 0x27, 0x13, 0x4e,
0x83, 0x89, 0x52, 0xb9, 0x56, 0xd8, 0x30, 0x0b, 0x9b, 0x72, 0xee, 0xde, 0x43, 0x30, 0x69, 0x07,
0x8e, 0x17, 0xb6, 0xfc, 0x80, 0xc0, 0x7b, 0xe2, 0x60, 0x36, 0xfa, 0x3a, 0x1a, 0xfd, 0xe0, 0x7b,
0x65, 0xee, 0x64, 0xcc, 0x7f, 0x0b, 0xd4, 0x86, 0x56, 0xa4, 0xd7, 0xa4, 0xfc, 0x85, 0x17, 0x7f,
0x59, 0x1a, 0x7a, 0xf1, 0xf5, 0x92, 0xf4, 0xd5, 0xd7, 0x4b, 0xd2, 0x9f, 0xbf, 0x5e, 0x92, 0x7e,
0xf2, 0xd7, 0xa5, 0xa1, 0x9d, 0x31, 0xf6, 0x83, 0xf1, 0xfd, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff,
0x5c, 0x9f, 0x8c, 0x37, 0x79, 0x1e, 0x00, 0x00,
// 2852 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x59, 0xcb, 0x77, 0xdb, 0xc6,
0xf5, 0x36, 0x45, 0x49, 0x96, 0xae, 0x5e, 0xd4, 0xc8, 0xb2, 0xe1, 0x97, 0x20, 0xc3, 0x71, 0x7e,
0xb2, 0x12, 0xd8, 0xf9, 0xd9, 0x39, 0x79, 0x38, 0x4d, 0x1c, 0x90, 0x82, 0x2d, 0x56, 0x10, 0x49,
0x0f, 0x21, 0xdb, 0x59, 0xf1, 0x40, 0xe4, 0x48, 0xe2, 0x31, 0x05, 0x30, 0xc0, 0xd0, 0x91, 0xb2,
0xeb, 0xaa, 0xdb, 0x36, 0x7d, 0x9c, 0xf6, 0x9c, 0xae, 0xba, 0x6e, 0xda, 0x7f, 0xc3, 0x79, 0xb5,
0x69, 0xbb, 0x6a, 0x17, 0x3c, 0x6d, 0xba, 0xe9, 0xaa, 0x0b, 0x9e, 0xbe, 0x57, 0x3d, 0x33, 0x03,
0x88, 0x03, 0x80, 0x94, 0xb4, 0x92, 0xe6, 0xde, 0xef, 0xfb, 0xe6, 0xce, 0xdc, 0xc1, 0xdc, 0x0b,
0x10, 0xe6, 0xfc, 0x76, 0xbd, 0xbd, 0x7d, 0xdb, 0x6f, 0xd7, 0x6f, 0xb5, 0x7d, 0x8f, 0x7a, 0x68,
0x8c, 0x1b, 0x2e, 0xe9, 0xbb, 0x4d, 0xba, 0xd7, 0xd9, 0xbe, 0x55, 0xf7, 0xf6, 0x6f, 0xef, 0x7a,
0xbb, 0xde, 0x6d, 0xee, 0xdd, 0xee, 0xec, 0xf0, 0x11, 0x1f, 0xf0, 0xff, 0x04, 0x4b, 0xfb, 0x6e,
0x06, 0xce, 0x62, 0xf2, 0x61, 0x87, 0x04, 0x14, 0xdd, 0x82, 0xc9, 0x72, 0x9b, 0xf8, 0x0e, 0x6d,
0x7a, 0xae, 0x92, 0x59, 0xce, 0xac, 0xcc, 0xde, 0xc9, 0xdd, 0xe2, 0xaa, 0xb7, 0x8e, 0xec, 0xb8,
0x0f, 0x41, 0x37, 0x60, 0x7c, 0x93, 0xec, 0x6f, 0x13, 0x5f, 0x19, 0x59, 0xce, 0xac, 0x4c, 0xdd,
0x99, 0x09, 0xc1, 0xc2, 0x88, 0x43, 0x27, 0x83, 0xd9, 0x24, 0xa0, 0xc4, 0x57, 0xb2, 0x31, 0x98,
0x30, 0xe2, 0xd0, 0xa9, 0xfd, 0x75, 0x04, 0xa6, 0xab, 0xae, 0xd3, 0x0e, 0xf6, 0x3c, 0x5a, 0x74,
0x77, 0x3c, 0xb4, 0x04, 0x20, 0x14, 0x4a, 0xce, 0x3e, 0xe1, 0xf1, 0x4c, 0x62, 0xc9, 0x82, 0x56,
0x21, 0x27, 0x46, 0x85, 0x56, 0x93, 0xb8, 0x74, 0x0b, 0x5b, 0x81, 0x32, 0xb2, 0x9c, 0x5d, 0x99,
0xc4, 0x29, 0x3b, 0xd2, 0xfa, 0xda, 0x15, 0x87, 0xee, 0xf1, 0x48, 0x26, 0x71, 0xcc, 0xc6, 0xf4,
0xa2, 0xf1, 0x83, 0x66, 0x8b, 0x54, 0x9b, 0x1f, 0x13, 0x65, 0x94, 0xe3, 0x52, 0x76, 0xf4, 0x2a,
0xcc, 0x47, 0x36, 0xdb, 0xa3, 0x4e, 0x8b, 0x83, 0xc7, 0x38, 0x38, 0xed, 0x90, 0x95, 0xb9, 0x71,
0x83, 0x1c, 0x2a, 0xe3, 0xcb, 0x99, 0x95, 0x2c, 0x4e, 0xd9, 0xe5, 0x48, 0xd7, 0x9d, 0x60, 0x4f,
0x39, 0xcb, 0x71, 0x31, 0x9b, 0xac, 0x87, 0xc9, 0xf3, 0x66, 0xc0, 0xf2, 0x35, 0x11, 0xd7, 0x8b,
0xec, 0x08, 0xc1, 0xa8, 0xed, 0x79, 0xcf, 0x94, 0x49, 0x1e, 0x1c, 0xff, 0x5f, 0xfb, 0x59, 0x06,
0x26, 0x30, 0x09, 0xda, 0x9e, 0x1b, 0x10, 0xa4, 0xc0, 0xd9, 0x6a, 0xa7, 0x5e, 0x27, 0x41, 0xc0,
0xf7, 0x78, 0x02, 0x47, 0x43, 0x74, 0x1e, 0xc6, 0xab, 0xd4, 0xa1, 0x9d, 0x80, 0xe7, 0x77, 0x12,
0x87, 0x23, 0x29, 0xef, 0xd9, 0xe3, 0xf2, 0xfe, 0x66, 0x3c, 0x9f, 0x7c, 0x2f, 0xa7, 0xee, 0x2c,
0x84, 0x60, 0xd9, 0x85, 0x63, 0x40, 0xed, 0x93, 0xe9, 0x68, 0x02, 0xf4, 0x1a, 0x4c, 0x98, 0xb4,
0xde, 0x30, 0x0f, 0x48, 0x5d, 0x9c, 0x80, 0xfc, 0xb9, 0x5e, 0x57, 0xcd, 0x1d, 0x3a, 0xfb, 0xad,
0x7b, 0x1a, 0xa1, 0xf5, 0x86, 0x4e, 0x0e, 0x48, 0x5d, 0xc3, 0x47, 0x28, 0x74, 0x17, 0x26, 0x8d,
0x5d, 0xe2, 0x52, 0xa3, 0xd1, 0xf0, 0x95, 0x29, 0x4e, 0x59, 0xec, 0x75, 0xd5, 0x79, 0x41, 0x71,
0x98, 0x4b, 0x77, 0x1a, 0x0d, 0x5f, 0xc3, 0x7d, 0x1c, 0xb2, 0x60, 0xfe, 0x81, 0xd3, 0x6c, 0xb5,
0xbd, 0xa6, 0x4b, 0xd7, 0x6d, 0xbb, 0xc2, 0xc9, 0xd3, 0x9c, 0xbc, 0xd4, 0xeb, 0xaa, 0x97, 0x04,
0x79, 0x27, 0x82, 0xe8, 0x7b, 0x94, 0xb6, 0x43, 0x95, 0x34, 0x11, 0xe9, 0x70, 0x36, 0xef, 0x04,
0x64, 0xad, 0xe9, 0x2b, 0x84, 0x6b, 0x2c, 0xf4, 0xba, 0xea, 0x9c, 0xd0, 0xd8, 0x76, 0x02, 0xa2,
0x37, 0x9a, 0xbe, 0x86, 0x23, 0x0c, 0x7a, 0x08, 0x73, 0x2c, 0x7a, 0x71, 0x5a, 0x2b, 0xbe, 0x77,
0x70, 0xa8, 0x7c, 0xc6, 0x33, 0x91, 0xbf, 0xd2, 0xeb, 0xaa, 0x8a, 0xb4, 0xd6, 0x3a, 0x87, 0xe8,
0x6d, 0x86, 0xd1, 0x70, 0x92, 0x85, 0x0c, 0x98, 0x61, 0xa6, 0x0a, 0x21, 0xbe, 0x90, 0xf9, 0x5c,
0xc8, 0x5c, 0xea, 0x75, 0xd5, 0xf3, 0x92, 0x4c, 0x9b, 0x10, 0x3f, 0x12, 0x89, 0x33, 0x50, 0x05,
0x50, 0x5f, 0xd5, 0x74, 0x1b, 0x7c, 0x61, 0xca, 0xa7, 0x3c, 0xff, 0x79, 0xb5, 0xd7, 0x55, 0x2f,
0xa7, 0xc3, 0x21, 0x21, 0x4c, 0xc3, 0x03, 0xb8, 0xe8, 0xff, 0x61, 0x94, 0x59, 0x95, 0x5f, 0x8a,
0x3b, 0x62, 0x2a, 0x4c, 0x3f, 0xb3, 0xe5, 0xe7, 0x7a, 0x5d, 0x75, 0xaa, 0x2f, 0xa8, 0x61, 0x0e,
0x45, 0x79, 0x58, 0x64, 0x7f, 0xcb, 0x6e, 0xff, 0x30, 0x07, 0xd4, 0xf3, 0x89, 0xf2, 0xab, 0xb4,
0x06, 0x1e, 0x0c, 0x45, 0x6b, 0x30, 0x2b, 0x02, 0x29, 0x10, 0x9f, 0xae, 0x39, 0xd4, 0x51, 0xbe,
0xcf, 0x9f, 0xf9, 0xfc, 0xe5, 0x5e, 0x57, 0xbd, 0x20, 0xe6, 0x0c, 0xe3, 0xaf, 0x13, 0x9f, 0xea,
0x0d, 0x87, 0x3a, 0x1a, 0x4e, 0x70, 0xe2, 0x2a, 0xfc, 0xe2, 0xf8, 0xe4, 0x58, 0x95, 0xb6, 0x43,
0xf7, 0x62, 0x2a, 0xfc, 0x62, 0x31, 0x60, 0x46, 0x58, 0x36, 0xc8, 0x21, 0x0f, 0xe5, 0x07, 0x42,
0x44, 0xca, 0x4b, 0x28, 0xf2, 0x8c, 0x1c, 0x86, 0x91, 0xc4, 0x19, 0x31, 0x09, 0x1e, 0xc7, 0x0f,
0x8f, 0x93, 0x10, 0x61, 0xc4, 0x19, 0xc8, 0x86, 0x05, 0x61, 0xb0, 0xfd, 0x4e, 0x40, 0x49, 0xa3,
0x60, 0xf0, 0x58, 0x7e, 0x24, 0x84, 0xae, 0xf5, 0xba, 0xea, 0xd5, 0x98, 0x10, 0x15, 0x30, 0xbd,
0xee, 0x84, 0x21, 0x0d, 0xa2, 0x0f, 0x50, 0xe5, 0xe1, 0xfd, 0xf8, 0x14, 0xaa, 0x22, 0xca, 0x41,
0x74, 0xf4, 0x1e, 0x4c, 0xb3, 0x33, 0x79, 0x94, 0xbb, 0x7f, 0x08, 0xb9, 0x8b, 0xbd, 0xae, 0xba,
0x28, 0xe4, 0xf8, 0x19, 0x96, 0x32, 0x17, 0xc3, 0xcb, 0x7c, 0x1e, 0xce, 0x3f, 0x8f, 0xe1, 0x8b,
0x30, 0x62, 0x78, 0xf4, 0x0e, 0x4c, 0xb1, 0x71, 0x94, 0xaf, 0x7f, 0x09, 0xba, 0xd2, 0xeb, 0xaa,
0xe7, 0x24, 0x7a, 0x3f, 0x5b, 0x32, 0x5a, 0x22, 0xf3, 0xb9, 0xff, 0x3d, 0x9c, 0x2c, 0xa6, 0x96,
0xd1, 0xa8, 0x04, 0xf3, 0x6c, 0x18, 0xcf, 0xd1, 0x7f, 0xb2, 0xc9, 0xe7, 0x8f, 0x4b, 0xa4, 0x32,
0x94, 0xa6, 0xa6, 0xf4, 0x78, 0x48, 0xff, 0x3d, 0x51, 0x4f, 0x44, 0x96, 0xa6, 0xa2, 0x77, 0x13,
0x85, 0xf4, 0x0f, 0xa3, 0xc9, 0xd5, 0x05, 0xa1, 0x3b, 0xda, 0xd8, 0x58, 0x8d, 0x7d, 0x2b, 0x51,
0x13, 0xfe, 0x78, 0xea, 0xa2, 0xf0, 0xf3, 0xe9, 0xa8, 0x8d, 0x60, 0xf7, 0x2b, 0x5b, 0x1b, 0xbb,
0x5f, 0x33, 0xc9, 0xfb, 0x95, 0x6d, 0x44, 0x78, 0xbf, 0x86, 0x18, 0xf4, 0x2a, 0x9c, 0x2d, 0x11,
0xfa, 0x91, 0xe7, 0x3f, 0x13, 0x75, 0x2c, 0x8f, 0x7a, 0x5d, 0x75, 0x56, 0xc0, 0x5d, 0xe1, 0xd0,
0x70, 0x04, 0x41, 0xd7, 0x61, 0x94, 0xdf, 0xfe, 0x62, 0x8b, 0xa4, 0x1b, 0x4a, 0x5c, 0xf7, 0xdc,
0x89, 0x0a, 0x30, 0xbb, 0x46, 0x5a, 0xce, 0xa1, 0xe5, 0x50, 0xe2, 0xd6, 0x0f, 0x37, 0x03, 0x5e,
0x69, 0x66, 0xe4, 0x6b, 0xa1, 0xc1, 0xfc, 0x7a, 0x4b, 0x00, 0xf4, 0xfd, 0x40, 0xc3, 0x09, 0x0a,
0xfa, 0x36, 0xe4, 0xe2, 0x16, 0xfc, 0x9c, 0xd7, 0x9c, 0x19, 0xb9, 0xe6, 0x24, 0x65, 0x74, 0xff,
0xb9, 0x86, 0x53, 0x3c, 0xf4, 0x01, 0x2c, 0x6e, 0xb5, 0x1b, 0x0e, 0x25, 0x8d, 0x44, 0x5c, 0x33,
0x5c, 0xf0, 0x7a, 0xaf, 0xab, 0xaa, 0x42, 0xb0, 0x23, 0x60, 0x7a, 0x3a, 0xbe, 0xc1, 0x0a, 0xe8,
0x0d, 0x00, 0xec, 0x75, 0xdc, 0x86, 0xd5, 0xdc, 0x6f, 0x52, 0x65, 0x71, 0x39, 0xb3, 0x32, 0x96,
0x3f, 0xdf, 0xeb, 0xaa, 0x48, 0xe8, 0xf9, 0xcc, 0xa7, 0xb7, 0x98, 0x53, 0xc3, 0x12, 0x12, 0xe5,
0x61, 0xd6, 0x3c, 0x68, 0xd2, 0xb2, 0x5b, 0x70, 0x02, 0xc2, 0x8a, 0xa4, 0x72, 0x3e, 0x55, 0x8d,
0x0e, 0x9a, 0x54, 0xf7, 0x5c, 0x9d, 0x15, 0xd6, 0x8e, 0x4f, 0x34, 0x9c, 0x60, 0xa0, 0xb7, 0x61,
0xca, 0x74, 0x9d, 0xed, 0x16, 0xa9, 0xb4, 0x7d, 0x6f, 0x47, 0xb9, 0xc0, 0x05, 0x2e, 0xf4, 0xba,
0xea, 0x42, 0x28, 0xc0, 0x9d, 0x7a, 0x9b, 0x79, 0x35, 0x2c, 0x63, 0xd1, 0x3d, 0x98, 0x62, 0x32,
0x7c, 0x31, 0x9b, 0x81, 0xa2, 0xf2, 0x7d, 0x90, 0x8e, 0x69, 0x9d, 0x17, 0x62, 0xbe, 0x09, 0x6c,
0xf1, 0x32, 0x98, 0x4d, 0xcb, 0x86, 0xd5, 0xbd, 0xce, 0xce, 0x4e, 0x8b, 0x28, 0xcb, 0xc9, 0x69,
0x39, 0x37, 0x10, 0xde, 0x90, 0x1a, 0x62, 0xd1, 0xcb, 0x30, 0xc6, 0x86, 0x81, 0x72, 0x8d, 0x75,
0xa2, 0xf9, 0x5c, 0xaf, 0xab, 0x4e, 0xf7, 0x49, 0x81, 0x86, 0x85, 0x1b, 0x6d, 0x48, 0x1d, 0x47,
0xc1, 0xdb, 0xdf, 0x77, 0xdc, 0x46, 0xa0, 0x68, 0x9c, 0x73, 0xb5, 0xd7, 0x55, 0x2f, 0x26, 0x3b,
0x8e, 0x7a, 0x88, 0x91, 0x1b, 0x8e, 0x88, 0xc7, 0x8e, 0x23, 0xee, 0xb8, 0x2e, 0xf1, 0x59, 0x07,
0xc4, 0x1f, 0xcb, 0x9b, 0xc9, 0x2a, 0xe5, 0x73, 0x3f, 0xef, 0x96, 0xa2, 0x2a, 0x15, 0xa7, 0xa0,
0x22, 0xe4, 0xcc, 0x03, 0x4a, 0x7c, 0xd7, 0x69, 0x1d, 0xc9, 0xac, 0x72, 0x19, 0x29, 0x20, 0x12,
0x22, 0x64, 0xa1, 0x14, 0x0d, 0xdd, 0x81, 0xc9, 0x2a, 0xf5, 0x49, 0x10, 0x10, 0x3f, 0x50, 0x08,
0x5f, 0x94, 0xd4, 0xb6, 0x05, 0x91, 0x4b, 0xc3, 0x7d, 0x18, 0xba, 0x0d, 0x13, 0x85, 0x3d, 0x52,
0x7f, 0xc6, 0x28, 0x3b, 0x9c, 0x22, 0x3d, 0xd5, 0xf5, 0xd0, 0xa3, 0xe1, 0x23, 0x10, 0x2b, 0x89,
0x82, 0xbd, 0x41, 0x0e, 0x79, 0xfb, 0xcd, 0x9b, 0xa6, 0x31, 0xf9, 0x7c, 0x89, 0x99, 0xf8, 0x55,
0x1b, 0x34, 0x3f, 0x26, 0x1a, 0x8e, 0x33, 0xd0, 0x23, 0x40, 0x31, 0x83, 0xe5, 0xf8, 0xbb, 0x44,
0x74, 0x4d, 0x63, 0xf9, 0xe5, 0x5e, 0x57, 0xbd, 0x32, 0x50, 0x47, 0x6f, 0x31, 0x9c, 0x86, 0x07,
0x90, 0xd1, 0x13, 0x38, 0xd7, 0xb7, 0x76, 0x76, 0x76, 0x9a, 0x07, 0xd8, 0x71, 0x77, 0x89, 0xf2,
0x85, 0x10, 0xd5, 0x7a, 0x5d, 0x75, 0x29, 0x2d, 0xca, 0x81, 0xba, 0xcf, 0x90, 0x1a, 0x1e, 0x28,
0x80, 0x1c, 0xb8, 0x30, 0xc8, 0x6e, 0x1f, 0xb8, 0xca, 0x97, 0x42, 0xfb, 0xe5, 0x5e, 0x57, 0xd5,
0x8e, 0xd5, 0xd6, 0xe9, 0x81, 0xab, 0xe1, 0x61, 0x3a, 0x68, 0x1d, 0xe6, 0x8e, 0x5c, 0xf6, 0x81,
0x5b, 0x6e, 0x07, 0xca, 0x57, 0x42, 0x5a, 0x3a, 0x01, 0x92, 0x34, 0x3d, 0x70, 0x75, 0xaf, 0x1d,
0x68, 0x38, 0x49, 0x43, 0xef, 0x47, 0xb9, 0x11, 0xc5, 0x3d, 0x10, 0x1d, 0xe4, 0x98, 0x5c, 0x80,
0x43, 0x1d, 0xd1, 0x16, 0x04, 0x47, 0xa9, 0x09, 0x09, 0xe8, 0xf5, 0xe8, 0x08, 0x3d, 0xaa, 0x54,
0x45, 0xef, 0x38, 0x26, 0xf7, 0xf1, 0x21, 0xfb, 0xc3, 0x76, 0xff, 0x10, 0x3d, 0xaa, 0x54, 0xb5,
0xef, 0xcc, 0x89, 0x6e, 0x93, 0xdd, 0xe2, 0xfd, 0xb7, 0x46, 0xf9, 0x16, 0x77, 0x9d, 0x7d, 0xa2,
0x61, 0xee, 0x94, 0xeb, 0xc8, 0xc8, 0x29, 0xea, 0xc8, 0x2a, 0x8c, 0x3f, 0x31, 0x2c, 0x86, 0xce,
0x26, 0xcb, 0xc8, 0x47, 0x4e, 0x4b, 0x80, 0x43, 0x04, 0x2a, 0xc3, 0xc2, 0x3a, 0x71, 0x7c, 0xba,
0x4d, 0x1c, 0x5a, 0x74, 0x29, 0xf1, 0x9f, 0x3b, 0xad, 0xb0, 0x4a, 0x64, 0xe5, 0xdd, 0xdc, 0x8b,
0x40, 0x7a, 0x33, 0x44, 0x69, 0x78, 0x10, 0x13, 0x15, 0x61, 0xde, 0x6c, 0x91, 0x3a, 0x7b, 0xef,
0xb6, 0x9b, 0xfb, 0xc4, 0xeb, 0xd0, 0xcd, 0x80, 0x57, 0x8b, 0xac, 0xfc, 0x94, 0x93, 0x10, 0xa2,
0x53, 0x81, 0xd1, 0x70, 0x9a, 0xc5, 0x1e, 0x74, 0xab, 0x19, 0x50, 0xe2, 0x4a, 0xef, 0xcd, 0x8b,
0xc9, 0x9b, 0xa7, 0xc5, 0x11, 0x51, 0x8b, 0xdf, 0xf1, 0x5b, 0x81, 0x86, 0x53, 0x34, 0x84, 0x61,
0xc1, 0x68, 0x3c, 0x27, 0x3e, 0x6d, 0x06, 0x44, 0x52, 0x3b, 0xcf, 0xd5, 0xa4, 0x07, 0xc8, 0x89,
0x40, 0x71, 0xc1, 0x41, 0x64, 0xf4, 0x76, 0xd4, 0xea, 0x1a, 0x1d, 0xea, 0xd9, 0x56, 0x35, 0xbc,
0xf5, 0xa5, 0xdc, 0x38, 0x1d, 0xea, 0xe9, 0x94, 0x09, 0xc4, 0x91, 0xec, 0x1e, 0xec, 0xb7, 0xde,
0x46, 0x87, 0xee, 0x29, 0x0a, 0xe7, 0x0e, 0xe9, 0xd6, 0x9d, 0x4e, 0xa2, 0x5b, 0x67, 0x14, 0xf4,
0x2d, 0x59, 0x84, 0xbd, 0xf0, 0x2b, 0x17, 0x93, 0x2f, 0x9e, 0x9c, 0xbd, 0xd3, 0x64, 0x97, 0x7f,
0x02, 0xdb, 0x8f, 0x7e, 0x83, 0x1c, 0x72, 0xf2, 0xa5, 0xe4, 0xc9, 0x62, 0x4f, 0x8e, 0xe0, 0xc6,
0x91, 0xc8, 0x4a, 0xb5, 0xd2, 0x5c, 0xe0, 0x72, 0xb2, 0xd1, 0x97, 0xda, 0x34, 0xa1, 0x33, 0x88,
0xc6, 0xf6, 0x42, 0xa4, 0x8b, 0xf5, 0x70, 0x3c, 0x2b, 0x2a, 0xcf, 0x8a, 0xb4, 0x17, 0x61, 0x8e,
0x79, 0xef, 0x27, 0x12, 0x92, 0xa0, 0x20, 0x1b, 0xe6, 0x8f, 0x52, 0x74, 0xa4, 0xb3, 0xcc, 0x75,
0xa4, 0xdb, 0xa6, 0xe9, 0x36, 0x69, 0xd3, 0x69, 0xe9, 0xfd, 0x2c, 0x4b, 0x92, 0x69, 0x01, 0x56,
0x9a, 0xd9, 0xff, 0x51, 0x7e, 0xaf, 0xf1, 0x1c, 0x25, 0xfb, 0xe3, 0x7e, 0x92, 0x65, 0x30, 0x7b,
0x41, 0xe5, 0x9d, 0x7a, 0x3c, 0xcd, 0x1a, 0x97, 0x90, 0x0e, 0x9c, 0x68, 0xef, 0x53, 0xb9, 0x1e,
0xc0, 0x65, 0x1d, 0x6d, 0xd4, 0xfb, 0xf3, 0xfd, 0xbe, 0x3e, 0xfc, 0x55, 0x41, 0x6c, 0x77, 0x0c,
0x1e, 0x2d, 0x26, 0x4a, 0xf7, 0x4b, 0x43, 0x9b, 0x7d, 0x41, 0x96, 0xc1, 0x68, 0x33, 0xd1, 0x9c,
0x73, 0x85, 0x1b, 0x27, 0xf5, 0xe6, 0x42, 0x28, 0xcd, 0x64, 0x1d, 0x57, 0x51, 0xa4, 0xa2, 0xd0,
0xea, 0xf0, 0x0f, 0x6e, 0x37, 0x93, 0x67, 0x27, 0x4a, 0x55, 0x5d, 0x00, 0x34, 0x9c, 0x60, 0xb0,
0x27, 0x3a, 0x6e, 0xa9, 0x52, 0x87, 0x92, 0xb0, 0x11, 0x90, 0x36, 0x38, 0x21, 0xa4, 0x07, 0x0c,
0xa6, 0xe1, 0x41, 0xe4, 0xb4, 0xa6, 0xed, 0x3d, 0x23, 0xae, 0xf2, 0xca, 0x49, 0x9a, 0x94, 0xc1,
0x52, 0x9a, 0x9c, 0x8c, 0xee, 0xc3, 0x4c, 0xf4, 0x7a, 0x50, 0xf0, 0x3a, 0x2e, 0x55, 0xee, 0xf2,
0xbb, 0x50, 0x2e, 0x30, 0xd1, 0x7b, 0x48, 0x9d, 0xf9, 0x59, 0x81, 0x91, 0xf1, 0xc8, 0x82, 0xf9,
0x47, 0x1d, 0x8f, 0x3a, 0x79, 0xa7, 0xfe, 0x8c, 0xb8, 0x8d, 0xfc, 0x21, 0x25, 0x81, 0xf2, 0x3a,
0x17, 0x91, 0xda, 0xef, 0x0f, 0x19, 0x44, 0xdf, 0x16, 0x18, 0x7d, 0x9b, 0x81, 0x34, 0x9c, 0x26,
0xb2, 0x52, 0x52, 0xf1, 0xc9, 0x63, 0x8f, 0x12, 0xe5, 0x7e, 0xf2, 0xba, 0x6a, 0xfb, 0x44, 0x7f,
0xee, 0xb1, 0xdd, 0x89, 0x30, 0xf2, 0x8e, 0x78, 0xbe, 0xdf, 0x69, 0x53, 0xde, 0xd5, 0x28, 0xef,
0x27, 0x8f, 0xf1, 0xd1, 0x8e, 0x08, 0x94, 0xce, 0xfb, 0x20, 0x69, 0x47, 0x24, 0x32, 0xba, 0x09,
0xe3, 0x96, 0xb7, 0xbb, 0x4b, 0x7c, 0xe5, 0x21, 0xdf, 0xd8, 0xf9, 0x5e, 0x57, 0x9d, 0x09, 0x1f,
0x74, 0x6e, 0xd7, 0x70, 0x08, 0x40, 0x77, 0x61, 0xd2, 0xf2, 0x76, 0xcb, 0x1d, 0xda, 0xee, 0x50,
0x65, 0x3d, 0xf9, 0x8d, 0xac, 0xe5, 0xed, 0xea, 0x1e, 0xf7, 0x69, 0xb8, 0x8f, 0x63, 0x9d, 0xed,
0x1a, 0xd9, 0xee, 0xec, 0x2a, 0x45, 0x1e, 0xa5, 0xd4, 0xd9, 0x36, 0x98, 0x59, 0xc3, 0xc2, 0xbd,
0xfa, 0xd3, 0xac, 0xf4, 0x19, 0x19, 0xcd, 0xc1, 0x54, 0xa9, 0x6c, 0xd7, 0xaa, 0xb6, 0x81, 0x6d,
0x73, 0x2d, 0x77, 0x06, 0x9d, 0x07, 0x54, 0x2c, 0x15, 0xed, 0xa2, 0x61, 0x09, 0x63, 0xcd, 0xb4,
0x0b, 0x6b, 0x39, 0x40, 0x39, 0x98, 0xc6, 0xa6, 0x64, 0x99, 0x62, 0x96, 0x6a, 0xf1, 0xa1, 0x6d,
0xe2, 0x4d, 0x61, 0x39, 0x87, 0x96, 0xe1, 0x4a, 0xb5, 0xf8, 0xf0, 0xd1, 0x56, 0x51, 0x60, 0x6a,
0x46, 0x69, 0xad, 0x86, 0xcd, 0xcd, 0xf2, 0x63, 0xb3, 0xb6, 0x66, 0xd8, 0x46, 0x6e, 0x11, 0xcd,
0xc3, 0x4c, 0xd5, 0x78, 0x6c, 0xd6, 0xaa, 0x25, 0xa3, 0x52, 0x5d, 0x2f, 0xdb, 0xb9, 0x25, 0x74,
0x0d, 0xae, 0x32, 0xe1, 0x32, 0x36, 0x6b, 0xd1, 0x04, 0x0f, 0x70, 0x79, 0xb3, 0x0f, 0x51, 0xd1,
0x45, 0x58, 0x1c, 0xec, 0x5a, 0x66, 0xec, 0xd4, 0x94, 0x06, 0x2e, 0xac, 0x17, 0xa3, 0x39, 0x57,
0xd0, 0x6d, 0x78, 0xe5, 0xb8, 0xa8, 0xf8, 0xb8, 0x6a, 0x97, 0x2b, 0x35, 0xe3, 0xa1, 0x59, 0xb2,
0x73, 0x37, 0xd1, 0x55, 0xb8, 0x98, 0xb7, 0x8c, 0xc2, 0xc6, 0x7a, 0xd9, 0x32, 0x6b, 0x15, 0xd3,
0xc4, 0xb5, 0x4a, 0x19, 0xdb, 0x35, 0xfb, 0x69, 0x0d, 0x3f, 0xcd, 0x35, 0x90, 0x0a, 0x97, 0xb7,
0x4a, 0xc3, 0x01, 0x04, 0x5d, 0x82, 0xc5, 0x35, 0xd3, 0x32, 0x3e, 0x48, 0xb9, 0x5e, 0x64, 0xd0,
0x15, 0xb8, 0xb0, 0x55, 0x1a, 0xec, 0xfd, 0x2c, 0xb3, 0xfa, 0x37, 0x80, 0x51, 0xf6, 0xfe, 0x81,
0x14, 0x38, 0x17, 0xed, 0x6d, 0xb9, 0x64, 0xd6, 0x1e, 0x94, 0x2d, 0xab, 0xfc, 0xc4, 0xc4, 0xb9,
0x33, 0xe1, 0x6a, 0x52, 0x9e, 0xda, 0x56, 0xc9, 0x2e, 0x5a, 0x35, 0x1b, 0x17, 0x1f, 0x3e, 0x34,
0x71, 0x7f, 0x87, 0x32, 0x08, 0xc1, 0x6c, 0x44, 0xb0, 0x4c, 0x63, 0xcd, 0xc4, 0xb9, 0x11, 0x74,
0x13, 0x6e, 0xc4, 0x6d, 0xc3, 0xe8, 0x59, 0x99, 0xfe, 0x68, 0xab, 0x8c, 0xb7, 0x36, 0x73, 0xa3,
0xec, 0xd0, 0x44, 0x36, 0xc3, 0xb2, 0x72, 0x63, 0xe8, 0x3a, 0xa8, 0xd1, 0x16, 0x4b, 0xbb, 0x1b,
0x8b, 0x1c, 0xd0, 0x3d, 0x78, 0xe3, 0x04, 0xd0, 0xb0, 0x28, 0xa6, 0x58, 0x4a, 0x06, 0x70, 0xc3,
0xf5, 0x4c, 0xa3, 0xd7, 0xe1, 0xb5, 0xa1, 0xee, 0x61, 0xa2, 0x33, 0xe8, 0x01, 0xe4, 0x07, 0xb0,
0xc4, 0x2a, 0x43, 0x8b, 0x38, 0x97, 0xa1, 0x50, 0x44, 0x0d, 0x0f, 0x61, 0x01, 0x1b, 0x76, 0x61,
0x3d, 0x37, 0x8b, 0x56, 0xe1, 0xe5, 0xa1, 0xc7, 0x21, 0xbe, 0x09, 0x0d, 0x64, 0xc0, 0xbb, 0xa7,
0xc3, 0x0e, 0x0b, 0x9b, 0xa0, 0x97, 0x60, 0x79, 0xb8, 0x44, 0xb8, 0x25, 0x3b, 0xe8, 0x1d, 0x78,
0xf3, 0x24, 0xd4, 0xb0, 0x29, 0x76, 0x8f, 0x9f, 0x22, 0x3c, 0x06, 0x7b, 0xec, 0xd9, 0x1b, 0x8e,
0x62, 0x07, 0xa3, 0x89, 0xfe, 0x0f, 0xb4, 0x81, 0x87, 0x3d, 0xbe, 0x2d, 0x2f, 0x32, 0xe8, 0x16,
0xdc, 0xc4, 0x46, 0x69, 0xad, 0xbc, 0x59, 0x3b, 0x05, 0xfe, 0xb3, 0x0c, 0x7a, 0x0f, 0xde, 0x3e,
0x19, 0x38, 0x6c, 0x81, 0x9f, 0x67, 0x90, 0x09, 0xef, 0x9f, 0x7a, 0xbe, 0x61, 0x32, 0x5f, 0x64,
0xd0, 0x35, 0xb8, 0x32, 0x98, 0x1f, 0xe6, 0xe1, 0xcb, 0x0c, 0x5a, 0x81, 0xeb, 0xc7, 0xce, 0x14,
0x22, 0xbf, 0xca, 0xa0, 0xb7, 0xe0, 0xee, 0x71, 0x90, 0x61, 0x61, 0xfc, 0x3a, 0x83, 0xee, 0xc3,
0xbd, 0x53, 0xcc, 0x31, 0x4c, 0xe0, 0x37, 0xc7, 0xac, 0x23, 0x4c, 0xf6, 0xd7, 0x27, 0xaf, 0x23,
0x44, 0xfe, 0x36, 0x83, 0x96, 0xe0, 0xe2, 0x60, 0x08, 0x3b, 0x13, 0xbf, 0xcb, 0xa0, 0x1b, 0xb0,
0x7c, 0xac, 0x12, 0x83, 0xfd, 0x3e, 0x83, 0x14, 0x58, 0x28, 0x95, 0x6b, 0x0f, 0x8c, 0xa2, 0x55,
0x7b, 0x52, 0xb4, 0xd7, 0x6b, 0x55, 0x1b, 0x9b, 0xd5, 0x6a, 0xee, 0x17, 0x23, 0x2c, 0x94, 0x98,
0xa7, 0x54, 0x0e, 0x9d, 0xb5, 0x07, 0x65, 0x5c, 0xb3, 0x8a, 0x8f, 0xcd, 0x12, 0x43, 0x7e, 0x3a,
0x82, 0xe6, 0x00, 0x18, 0xac, 0x52, 0x2e, 0x96, 0xec, 0x6a, 0xee, 0x7b, 0x59, 0x34, 0x03, 0x13,
0xe6, 0x53, 0xdb, 0xc4, 0x25, 0xc3, 0xca, 0xfd, 0x3d, 0xbb, 0xba, 0x0f, 0x13, 0xd1, 0x27, 0x0e,
0x34, 0x0e, 0x23, 0x1b, 0x8f, 0x73, 0x67, 0xd0, 0x24, 0x8c, 0x59, 0xa6, 0x51, 0x35, 0x73, 0x19,
0xb4, 0x00, 0x73, 0xa6, 0x65, 0x16, 0xec, 0x62, 0xb9, 0x54, 0xc3, 0x5b, 0xa5, 0x12, 0xbf, 0x3c,
0x73, 0x30, 0xfd, 0x84, 0x3d, 0xf9, 0x91, 0x25, 0x8b, 0x16, 0x61, 0xde, 0x2a, 0x17, 0x36, 0x6a,
0xd8, 0x28, 0x98, 0x38, 0x32, 0x8f, 0x32, 0x20, 0x17, 0x8a, 0x2c, 0x63, 0xab, 0x79, 0x38, 0x1b,
0x7e, 0x1f, 0x41, 0x53, 0x70, 0x76, 0xe3, 0x71, 0x6d, 0xdd, 0xa8, 0xae, 0xe7, 0xce, 0xf4, 0x91,
0xe6, 0xd3, 0x4a, 0x11, 0xb3, 0x99, 0x01, 0xc6, 0x8f, 0x26, 0x9c, 0x86, 0x89, 0x52, 0xb9, 0x56,
0x58, 0x37, 0x0b, 0x1b, 0xb9, 0xec, 0x9d, 0xfb, 0x30, 0x69, 0xfb, 0x8e, 0x1b, 0xb4, 0x3d, 0x9f,
0xa2, 0x3b, 0xf2, 0x60, 0x36, 0xfc, 0x4a, 0x1b, 0xfe, 0x5e, 0x7c, 0x69, 0xee, 0x68, 0x2c, 0x7e,
0x4a, 0xd4, 0xce, 0xac, 0x64, 0x5e, 0xcb, 0xe4, 0xcf, 0xbd, 0xf8, 0xf3, 0xd2, 0x99, 0x17, 0xdf,
0x2c, 0x65, 0xbe, 0xfe, 0x66, 0x29, 0xf3, 0xa7, 0x6f, 0x96, 0x32, 0x3f, 0xf9, 0xcb, 0xd2, 0x99,
0xed, 0x71, 0xfe, 0x7b, 0xf3, 0xdd, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x2a, 0x7b, 0xc4, 0x2d,
0xb8, 0x1e, 0x00, 0x00,
}

View File

@ -45,9 +45,8 @@ service Transport {
}
message Member {
// EtcdExecPath is the executable etcd binary path in agent server.
string EtcdExecPath = 1 [(gogoproto.moretags) = "yaml:\"etcd-exec-path\""];
// TODO: support embedded etcd
// EtcdExec is the executable etcd binary path in agent server.
string EtcdExec = 1 [(gogoproto.moretags) = "yaml:\"etcd-exec\""];
// AgentAddr is the agent HTTP server address.
string AgentAddr = 11 [(gogoproto.moretags) = "yaml:\"agent-addr\""];
@ -56,8 +55,6 @@ message Member {
// BaseDir is the base directory where all logs and etcd data are stored.
string BaseDir = 101 [(gogoproto.moretags) = "yaml:\"base-dir\""];
// EtcdLogPath is the log file to store current etcd server logs.
string EtcdLogPath = 102 [(gogoproto.moretags) = "yaml:\"etcd-log-path\""];
// EtcdClientProxy is true when client traffic needs to be proxied.
// If true, listen client URL port must be different than advertise client URL port.
@ -204,6 +201,11 @@ message Etcd {
bool PreVote = 63 [(gogoproto.moretags) = "yaml:\"pre-vote\""];
bool InitialCorruptCheck = 64 [(gogoproto.moretags) = "yaml:\"initial-corrupt-check\""];
string Logger = 71 [(gogoproto.moretags) = "yaml:\"logger\""];
// LogOutput is the log file to store current etcd server logs.
string LogOutput = 72 [(gogoproto.moretags) = "yaml:\"log-output\""];
bool Debug = 73 [(gogoproto.moretags) = "yaml:\"debug\""];
}
enum Operation {

View File

@ -48,10 +48,6 @@ func read(lg *zap.Logger, fpath string) (*Cluster, error) {
if mem.BaseDir == "" {
return nil, fmt.Errorf("BaseDir cannot be empty (got %q)", mem.BaseDir)
}
if mem.EtcdLogPath == "" {
return nil, fmt.Errorf("EtcdLogPath cannot be empty (got %q)", mem.EtcdLogPath)
}
if mem.Etcd.Name == "" {
return nil, fmt.Errorf("'--name' cannot be empty (got %+v)", mem)
}
@ -132,9 +128,6 @@ func read(lg *zap.Logger, fpath string) (*Cluster, error) {
}
}
if !strings.HasPrefix(mem.EtcdLogPath, mem.BaseDir) {
return nil, fmt.Errorf("EtcdLogPath must be prefixed with BaseDir (got %q)", mem.EtcdLogPath)
}
if !strings.HasPrefix(mem.Etcd.DataDir, mem.BaseDir) {
return nil, fmt.Errorf("Etcd.DataDir must be prefixed with BaseDir (got %q)", mem.Etcd.DataDir)
}
@ -317,6 +310,13 @@ func read(lg *zap.Logger, fpath string) (*Cluster, error) {
}
clus.Members[i].ClientCertData = string(data)
}
if mem.Etcd.LogOutput == "" {
return nil, fmt.Errorf("mem.Etcd.LogOutput cannot be empty")
}
if !strings.HasPrefix(mem.Etcd.LogOutput, mem.BaseDir) {
return nil, fmt.Errorf("LogOutput %q must be prefixed with BaseDir %q", mem.Etcd.LogOutput, mem.BaseDir)
}
}
}

View File

@ -28,11 +28,10 @@ func Test_read(t *testing.T) {
exp := &Cluster{
Members: []*rpcpb.Member{
{
EtcdExecPath: "./bin/etcd",
EtcdExec: "./bin/etcd",
AgentAddr: "127.0.0.1:19027",
FailpointHTTPAddr: "http://127.0.0.1:7381",
BaseDir: "/tmp/etcd-functional-1",
EtcdLogPath: "/tmp/etcd-functional-1/etcd.log",
EtcdClientProxy: false,
EtcdPeerProxy: true,
EtcdClientEndpoint: "127.0.0.1:1379",
@ -63,6 +62,9 @@ func Test_read(t *testing.T) {
QuotaBackendBytes: 10740000000,
PreVote: true,
InitialCorruptCheck: true,
Logger: "zap",
LogOutput: "/tmp/etcd-functional-1/etcd.log",
Debug: true,
},
ClientCertData: "",
ClientCertPath: "",
@ -79,11 +81,10 @@ func Test_read(t *testing.T) {
SnapshotPath: "/tmp/etcd-functional-1.snapshot.db",
},
{
EtcdExecPath: "./bin/etcd",
EtcdExec: "./bin/etcd",
AgentAddr: "127.0.0.1:29027",
FailpointHTTPAddr: "http://127.0.0.1:7382",
BaseDir: "/tmp/etcd-functional-2",
EtcdLogPath: "/tmp/etcd-functional-2/etcd.log",
EtcdClientProxy: false,
EtcdPeerProxy: true,
EtcdClientEndpoint: "127.0.0.1:2379",
@ -114,6 +115,9 @@ func Test_read(t *testing.T) {
QuotaBackendBytes: 10740000000,
PreVote: true,
InitialCorruptCheck: true,
Logger: "zap",
LogOutput: "/tmp/etcd-functional-2/etcd.log",
Debug: true,
},
ClientCertData: "",
ClientCertPath: "",
@ -130,11 +134,10 @@ func Test_read(t *testing.T) {
SnapshotPath: "/tmp/etcd-functional-2.snapshot.db",
},
{
EtcdExecPath: "./bin/etcd",
EtcdExec: "./bin/etcd",
AgentAddr: "127.0.0.1:39027",
FailpointHTTPAddr: "http://127.0.0.1:7383",
BaseDir: "/tmp/etcd-functional-3",
EtcdLogPath: "/tmp/etcd-functional-3/etcd.log",
EtcdClientProxy: false,
EtcdPeerProxy: true,
EtcdClientEndpoint: "127.0.0.1:3379",
@ -165,6 +168,9 @@ func Test_read(t *testing.T) {
QuotaBackendBytes: 10740000000,
PreVote: true,
InitialCorruptCheck: true,
Logger: "zap",
LogOutput: "/tmp/etcd-functional-3/etcd.log",
Debug: true,
},
ClientCertData: "",
ClientCertPath: "",

View File

@ -27,6 +27,7 @@ import (
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
"github.com/coreos/etcd/functional/rpcpb"
"github.com/coreos/etcd/raft"
"go.uber.org/zap"
"golang.org/x/time/rate"
@ -151,6 +152,8 @@ func (s *keyStresser) run() {
// capability check has not been done (in the beginning)
case rpctypes.ErrTooManyRequests.Error():
// hitting the recovering member.
case raft.ErrProposalDropped.Error():
// removed member, or leadership has changed (old leader got raftpb.MsgProp)
case context.Canceled.Error():
// from stresser.Cancel method:
return
@ -163,6 +166,7 @@ func (s *keyStresser) run() {
zap.String("stress-type", s.stype.String()),
zap.String("endpoint", s.m.EtcdClientEndpoint),
zap.String("error-type", reflect.TypeOf(err).String()),
zap.String("error-desc", rpctypes.ErrorDesc(err)),
zap.Error(err),
)
return

View File

@ -28,8 +28,6 @@ import (
"github.com/coreos/etcd/client"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/pkg/testutil"
"github.com/coreos/pkg/capnslog"
)
func init() {
@ -456,9 +454,6 @@ func TestRejectUnhealthyRemove(t *testing.T) {
func TestRestartRemoved(t *testing.T) {
defer testutil.AfterTest(t)
capnslog.SetGlobalLogLevel(capnslog.INFO)
defer capnslog.SetGlobalLogLevel(defaultLogLevel)
// 1. start single-member cluster
c := NewCluster(t, 1)
for _, m := range c.Members {

View File

@ -52,6 +52,10 @@ func TestEmbedEtcd(t *testing.T) {
// setup defaults
for i := range tests {
tests[i].cfg = *embed.NewConfig()
tests[i].cfg.Logger = "zap"
tests[i].cfg.LogOutput = "discard"
tests[i].cfg.Debug = false
}
tests[0].cfg.Durl = "abc"
@ -175,6 +179,10 @@ func newEmbedURLs(secure bool, n int) (urls []url.URL) {
}
func setupEmbedCfg(cfg *embed.Config, curls []url.URL, purls []url.URL) {
cfg.Logger = "zap"
cfg.LogOutput = "discard"
cfg.Debug = false
cfg.ClusterState = "new"
cfg.LCUrls, cfg.ACUrls = curls, curls
cfg.LPUrls, cfg.APUrls = purls, purls

View File

@ -28,6 +28,7 @@ import (
func TestPauseMember(t *testing.T) {
defer testutil.AfterTest(t)
c := NewCluster(t, 5)
c.Launch(t)
defer c.Terminate(t)

View File

@ -27,6 +27,8 @@ import (
"github.com/coreos/etcd/mvcc"
"github.com/coreos/etcd/mvcc/backend"
"github.com/coreos/etcd/pkg/testutil"
"go.uber.org/zap"
)
// TestV3StorageQuotaApply tests the V3 server respects quotas during apply
@ -164,7 +166,7 @@ func TestV3CorruptAlarm(t *testing.T) {
clus.Members[0].Stop(t)
fp := filepath.Join(clus.Members[0].DataDir, "member", "snap", "db")
be := backend.NewDefaultBackend(fp)
s := mvcc.NewStore(be, nil, &fakeConsistentIndex{13})
s := mvcc.NewStore(zap.NewExample(), be, nil, &fakeConsistentIndex{13})
// NOTE: cluster_proxy mode with namespacing won't set 'k', but namespace/'k'.
s.Put([]byte("abc"), []byte("def"), 0)
s.Put([]byte("xyz"), []byte("123"), 0)

View File

@ -27,6 +27,8 @@ import (
bolt "github.com/coreos/bbolt"
"github.com/coreos/pkg/capnslog"
humanize "github.com/dustin/go-humanize"
"go.uber.org/zap"
)
var (
@ -97,6 +99,8 @@ type backend struct {
stopc chan struct{}
donec chan struct{}
lg *zap.Logger
}
type BackendConfig struct {
@ -108,6 +112,8 @@ type BackendConfig struct {
BatchLimit int
// MmapSize is the number of bytes to mmap for the backend.
MmapSize uint64
// Logger logs backend-side operations.
Logger *zap.Logger
}
func DefaultBackendConfig() BackendConfig {
@ -137,7 +143,11 @@ func newBackend(bcfg BackendConfig) *backend {
db, err := bolt.Open(bcfg.Path, 0600, bopts)
if err != nil {
plog.Panicf("cannot open database at %s (%v)", bcfg.Path, err)
if bcfg.Logger != nil {
bcfg.Logger.Panic("failed to open database", zap.String("path", bcfg.Path), zap.Error(err))
} else {
plog.Panicf("cannot open database at %s (%v)", bcfg.Path, err)
}
}
// In future, may want to make buffering optional for low-concurrency systems
@ -157,6 +167,8 @@ func newBackend(bcfg BackendConfig) *backend {
stopc: make(chan struct{}),
donec: make(chan struct{}),
lg: bcfg.Logger,
}
b.batchTx = newBatchTxBuffered(b)
go b.run()
@ -204,7 +216,16 @@ func (b *backend) Snapshot() Snapshot {
for {
select {
case <-ticker.C:
plog.Warningf("snapshotting is taking more than %v seconds to finish transferring %v MB [started at %v]", time.Since(start).Seconds(), float64(dbBytes)/float64(1024*1014), start)
if b.lg != nil {
b.lg.Warn(
"snapshotting taking too long to transfer",
zap.Duration("taking", time.Since(start)),
zap.Int64("bytes", dbBytes),
zap.String("size", humanize.Bytes(uint64(dbBytes))),
)
} else {
plog.Warningf("snapshotting is taking more than %v seconds to finish transferring %v MB [started at %v]", time.Since(start).Seconds(), float64(dbBytes)/float64(1024*1014), start)
}
case <-stopc:
snapshotDurations.Observe(time.Since(start).Seconds())
return
@ -294,6 +315,8 @@ func (b *backend) Defrag() error {
}
func (b *backend) defrag() error {
now := time.Now()
// TODO: make this non-blocking?
// lock batchTx to ensure nobody is using previous tx, and then
// close previous ongoing tx.
@ -317,37 +340,67 @@ func (b *backend) defrag() error {
return err
}
err = defragdb(b.db, tmpdb, defragLimit)
dbp := b.db.Path()
tdbp := tmpdb.Path()
size1, sizeInUse1 := b.Size(), b.SizeInUse()
if b.lg != nil {
b.lg.Info(
"defragmenting",
zap.String("path", dbp),
zap.Int64("current-db-size-bytes", size1),
zap.String("current-db-size", humanize.Bytes(uint64(size1))),
zap.Int64("current-db-size-in-use-bytes", sizeInUse1),
zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse1))),
)
}
err = defragdb(b.db, tmpdb, defragLimit)
if err != nil {
tmpdb.Close()
os.RemoveAll(tmpdb.Path())
return err
}
dbp := b.db.Path()
tdbp := tmpdb.Path()
err = b.db.Close()
if err != nil {
plog.Fatalf("cannot close database (%s)", err)
if b.lg != nil {
b.lg.Fatal("failed to close database", zap.Error(err))
} else {
plog.Fatalf("cannot close database (%s)", err)
}
}
err = tmpdb.Close()
if err != nil {
plog.Fatalf("cannot close database (%s)", err)
if b.lg != nil {
b.lg.Fatal("failed to close tmp database", zap.Error(err))
} else {
plog.Fatalf("cannot close database (%s)", err)
}
}
err = os.Rename(tdbp, dbp)
if err != nil {
plog.Fatalf("cannot rename database (%s)", err)
if b.lg != nil {
b.lg.Fatal("failed to rename tmp database", zap.Error(err))
} else {
plog.Fatalf("cannot rename database (%s)", err)
}
}
b.db, err = bolt.Open(dbp, 0600, boltOpenOptions)
if err != nil {
plog.Panicf("cannot open database at %s (%v)", dbp, err)
if b.lg != nil {
b.lg.Fatal("failed to open database", zap.String("path", dbp), zap.Error(err))
} else {
plog.Panicf("cannot open database at %s (%v)", dbp, err)
}
}
b.batchTx.tx, err = b.db.Begin(true)
if err != nil {
plog.Fatalf("cannot begin tx (%s)", err)
if b.lg != nil {
b.lg.Fatal("failed to begin tx", zap.Error(err))
} else {
plog.Fatalf("cannot begin tx (%s)", err)
}
}
b.readTx.reset()
@ -358,6 +411,20 @@ func (b *backend) defrag() error {
atomic.StoreInt64(&b.size, size)
atomic.StoreInt64(&b.sizeInUse, size-(int64(db.Stats().FreePageN)*int64(db.Info().PageSize)))
size2, sizeInUse2 := b.Size(), b.SizeInUse()
if b.lg != nil {
b.lg.Info(
"defragmented",
zap.String("path", dbp),
zap.Int64("current-db-size-bytes-diff", size2-size1),
zap.Int64("current-db-size-bytes", size2),
zap.String("current-db-size", humanize.Bytes(uint64(size2))),
zap.Int64("current-db-size-in-use-bytes-diff", sizeInUse2-sizeInUse1),
zap.Int64("current-db-size-in-use-bytes", sizeInUse2),
zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse2))),
zap.Duration("took", time.Since(now)),
)
}
return nil
}
@ -429,7 +496,11 @@ func (b *backend) begin(write bool) *bolt.Tx {
func (b *backend) unsafeBegin(write bool) *bolt.Tx {
tx, err := b.db.Begin(write)
if err != nil {
plog.Fatalf("cannot begin tx (%s)", err)
if b.lg != nil {
b.lg.Fatal("failed to begin tx", zap.Error(err))
} else {
plog.Fatalf("cannot begin tx (%s)", err)
}
}
return tx
}
@ -438,7 +509,7 @@ func (b *backend) unsafeBegin(write bool) *bolt.Tx {
func NewTmpBackend(batchInterval time.Duration, batchLimit int) (*backend, string) {
dir, err := ioutil.TempDir(os.TempDir(), "etcd_backend_test")
if err != nil {
plog.Fatal(err)
panic(err)
}
tmpPath := filepath.Join(dir, "database")
bcfg := DefaultBackendConfig()

View File

@ -19,6 +19,7 @@ import (
"sync"
"github.com/google/btree"
"go.uber.org/zap"
)
type index interface {
@ -39,11 +40,13 @@ type index interface {
type treeIndex struct {
sync.RWMutex
tree *btree.BTree
lg *zap.Logger
}
func newTreeIndex() index {
func newTreeIndex(lg *zap.Logger) index {
return &treeIndex{
tree: btree.New(32),
lg: lg,
}
}
@ -183,7 +186,11 @@ func (ti *treeIndex) RangeSince(key, end []byte, rev int64) []revision {
func (ti *treeIndex) Compact(rev int64) map[revision]struct{} {
available := make(map[revision]struct{})
var emptyki []*keyIndex
plog.Printf("store.index: compact %d", rev)
if ti.lg != nil {
ti.lg.Info("compact tree index", zap.Int64("revision", rev))
} else {
plog.Printf("store.index: compact %d", rev)
}
// TODO: do not hold the lock for long time?
// This is probably OK. Compacting 10M keys takes O(10ms).
ti.Lock()
@ -192,7 +199,11 @@ func (ti *treeIndex) Compact(rev int64) map[revision]struct{} {
for _, ki := range emptyki {
item := ti.tree.Delete(ki)
if item == nil {
plog.Panic("store.index: unexpected delete failure during compaction")
if ti.lg != nil {
ti.lg.Panic("failed to delete during compaction")
} else {
plog.Panic("store.index: unexpected delete failure during compaction")
}
}
}
return available

View File

@ -19,10 +19,11 @@ import (
"testing"
"github.com/google/btree"
"go.uber.org/zap"
)
func TestIndexGet(t *testing.T) {
ti := newTreeIndex()
ti := newTreeIndex(zap.NewExample())
ti.Put([]byte("foo"), revision{main: 2})
ti.Put([]byte("foo"), revision{main: 4})
ti.Tombstone([]byte("foo"), revision{main: 6})
@ -64,7 +65,7 @@ func TestIndexRange(t *testing.T) {
allKeys := [][]byte{[]byte("foo"), []byte("foo1"), []byte("foo2")}
allRevs := []revision{{main: 1}, {main: 2}, {main: 3}}
ti := newTreeIndex()
ti := newTreeIndex(zap.NewExample())
for i := range allKeys {
ti.Put(allKeys[i], allRevs[i])
}
@ -120,7 +121,7 @@ func TestIndexRange(t *testing.T) {
}
func TestIndexTombstone(t *testing.T) {
ti := newTreeIndex()
ti := newTreeIndex(zap.NewExample())
ti.Put([]byte("foo"), revision{main: 1})
err := ti.Tombstone([]byte("foo"), revision{main: 2})
@ -142,7 +143,7 @@ func TestIndexRangeSince(t *testing.T) {
allKeys := [][]byte{[]byte("foo"), []byte("foo1"), []byte("foo2"), []byte("foo2"), []byte("foo1"), []byte("foo")}
allRevs := []revision{{main: 1}, {main: 2}, {main: 3}, {main: 4}, {main: 5}, {main: 6}}
ti := newTreeIndex()
ti := newTreeIndex(zap.NewExample())
for i := range allKeys {
ti.Put(allKeys[i], allRevs[i])
}
@ -216,7 +217,7 @@ func TestIndexCompactAndKeep(t *testing.T) {
}
// Continuous Compact and Keep
ti := newTreeIndex()
ti := newTreeIndex(zap.NewExample())
for _, tt := range tests {
if tt.remove {
ti.Tombstone(tt.key, tt.rev)
@ -247,7 +248,7 @@ func TestIndexCompactAndKeep(t *testing.T) {
// Once Compact and Keep
for i := int64(1); i < maxRev; i++ {
ti := newTreeIndex()
ti := newTreeIndex(zap.NewExample())
for _, tt := range tests {
if tt.remove {
ti.Tombstone(tt.key, tt.rev)

View File

@ -28,6 +28,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"go.uber.org/zap"
)
// Functional tests for features implemented in v3 store. It treats v3 store
@ -75,7 +76,7 @@ func TestKVTxnRange(t *testing.T) { testKVRange(t, txnRangeFunc) }
func testKVRange(t *testing.T, f rangeFunc) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer cleanup(s, b, tmpPath)
kvs := put3TestKVs(s)
@ -141,7 +142,7 @@ func TestKVTxnRangeRev(t *testing.T) { testKVRangeRev(t, txnRangeFunc) }
func testKVRangeRev(t *testing.T, f rangeFunc) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer cleanup(s, b, tmpPath)
kvs := put3TestKVs(s)
@ -177,7 +178,7 @@ func TestKVTxnRangeBadRev(t *testing.T) { testKVRangeBadRev(t, txnRangeFunc) }
func testKVRangeBadRev(t *testing.T, f rangeFunc) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer cleanup(s, b, tmpPath)
put3TestKVs(s)
@ -210,7 +211,7 @@ func TestKVTxnRangeLimit(t *testing.T) { testKVRangeLimit(t, txnRangeFunc) }
func testKVRangeLimit(t *testing.T, f rangeFunc) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer cleanup(s, b, tmpPath)
kvs := put3TestKVs(s)
@ -251,7 +252,7 @@ func TestKVTxnPutMultipleTimes(t *testing.T) { testKVPutMultipleTimes(t, txnPutF
func testKVPutMultipleTimes(t *testing.T, f putFunc) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer cleanup(s, b, tmpPath)
for i := 0; i < 10; i++ {
@ -313,7 +314,7 @@ func testKVDeleteRange(t *testing.T, f deleteRangeFunc) {
for i, tt := range tests {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
s.Put([]byte("foo1"), []byte("bar1"), lease.NoLease)
@ -333,7 +334,7 @@ func TestKVTxnDeleteMultipleTimes(t *testing.T) { testKVDeleteMultipleTimes(t, t
func testKVDeleteMultipleTimes(t *testing.T, f deleteRangeFunc) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer cleanup(s, b, tmpPath)
s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
@ -354,7 +355,7 @@ func testKVDeleteMultipleTimes(t *testing.T, f deleteRangeFunc) {
// test that range, put, delete on single key in sequence repeatedly works correctly.
func TestKVOperationInSequence(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer cleanup(s, b, tmpPath)
for i := 0; i < 10; i++ {
@ -401,7 +402,7 @@ func TestKVOperationInSequence(t *testing.T) {
func TestKVTxnBlockWriteOperations(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
tests := []func(){
func() { s.Put([]byte("foo"), nil, lease.NoLease) },
@ -434,7 +435,7 @@ func TestKVTxnBlockWriteOperations(t *testing.T) {
func TestKVTxnNonBlockRange(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer cleanup(s, b, tmpPath)
txn := s.Write()
@ -455,7 +456,7 @@ func TestKVTxnNonBlockRange(t *testing.T) {
// test that txn range, put, delete on single key in sequence repeatedly works correctly.
func TestKVTxnOperationInSequence(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer cleanup(s, b, tmpPath)
for i := 0; i < 10; i++ {
@ -505,7 +506,7 @@ func TestKVTxnOperationInSequence(t *testing.T) {
func TestKVCompactReserveLastValue(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer cleanup(s, b, tmpPath)
s.Put([]byte("foo"), []byte("bar0"), 1)
@ -559,7 +560,7 @@ func TestKVCompactReserveLastValue(t *testing.T) {
func TestKVCompactBad(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer cleanup(s, b, tmpPath)
s.Put([]byte("foo"), []byte("bar0"), lease.NoLease)
@ -592,7 +593,7 @@ func TestKVHash(t *testing.T) {
for i := 0; i < len(hashes); i++ {
var err error
b, tmpPath := backend.NewDefaultTmpBackend()
kv := NewStore(b, &lease.FakeLessor{}, nil)
kv := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
kv.Put([]byte("foo0"), []byte("bar0"), lease.NoLease)
kv.Put([]byte("foo1"), []byte("bar0"), lease.NoLease)
hashes[i], _, err = kv.Hash()
@ -630,7 +631,7 @@ func TestKVRestore(t *testing.T) {
}
for i, tt := range tests {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
tt(s)
var kvss [][]mvccpb.KeyValue
for k := int64(0); k < 10; k++ {
@ -642,7 +643,7 @@ func TestKVRestore(t *testing.T) {
s.Close()
// ns should recover the the previous state from backend.
ns := NewStore(b, &lease.FakeLessor{}, nil)
ns := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
if keysRestore := readGaugeInt(&keysGauge); keysBefore != keysRestore {
t.Errorf("#%d: got %d key count, expected %d", i, keysRestore, keysBefore)
@ -674,7 +675,7 @@ func readGaugeInt(g *prometheus.Gauge) int {
func TestKVSnapshot(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer cleanup(s, b, tmpPath)
wkvs := put3TestKVs(s)
@ -694,7 +695,7 @@ func TestKVSnapshot(t *testing.T) {
}
f.Close()
ns := NewStore(b, &lease.FakeLessor{}, nil)
ns := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer ns.Close()
r, err := ns.Range([]byte("a"), []byte("z"), RangeOptions{})
if err != nil {
@ -710,7 +711,7 @@ func TestKVSnapshot(t *testing.T) {
func TestWatchableKVWatch(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := WatchableKV(newWatchableStore(b, &lease.FakeLessor{}, nil))
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil))
defer cleanup(s, b, tmpPath)
w := s.NewWatchStream()

View File

@ -30,6 +30,7 @@ import (
"github.com/coreos/etcd/pkg/schedule"
"github.com/coreos/pkg/capnslog"
"go.uber.org/zap"
)
var (
@ -100,15 +101,17 @@ type store struct {
fifoSched schedule.Scheduler
stopc chan struct{}
lg *zap.Logger
}
// NewStore returns a new store. It is useful to create a store inside
// mvcc pkg. It should only be used for testing externally.
func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *store {
func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *store {
s := &store{
b: b,
ig: ig,
kvindex: newTreeIndex(),
kvindex: newTreeIndex(lg),
le: le,
@ -119,6 +122,8 @@ func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *sto
fifoSched: schedule.NewFIFOScheduler(),
stopc: make(chan struct{}),
lg: lg,
}
s.ReadView = &readView{s}
s.WriteView = &writeView{s}
@ -291,7 +296,7 @@ func (s *store) Restore(b backend.Backend) error {
atomic.StoreUint64(&s.consistentIndex, 0)
s.b = b
s.kvindex = newTreeIndex()
s.kvindex = newTreeIndex(s.lg)
s.currentRev = 1
s.compactMainRev = -1
s.fifoSched = schedule.NewFIFOScheduler()

View File

@ -20,6 +20,8 @@ import (
"github.com/coreos/etcd/lease"
"github.com/coreos/etcd/mvcc/backend"
"go.uber.org/zap"
)
type fakeConsistentIndex uint64
@ -31,7 +33,7 @@ func (i *fakeConsistentIndex) ConsistentIndex() uint64 {
func BenchmarkStorePut(b *testing.B) {
var i fakeConsistentIndex
be, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(be, &lease.FakeLessor{}, &i)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i)
defer cleanup(s, be, tmpPath)
// arbitrary number of bytes
@ -51,7 +53,7 @@ func BenchmarkStoreRangeKey100(b *testing.B) { benchmarkStoreRange(b, 100) }
func benchmarkStoreRange(b *testing.B, n int) {
var i fakeConsistentIndex
be, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(be, &lease.FakeLessor{}, &i)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i)
defer cleanup(s, be, tmpPath)
// 64 byte key/val
@ -79,7 +81,7 @@ func benchmarkStoreRange(b *testing.B, n int) {
func BenchmarkConsistentIndex(b *testing.B) {
fci := fakeConsistentIndex(10)
be, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(be, &lease.FakeLessor{}, &fci)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &fci)
defer cleanup(s, be, tmpPath)
tx := s.b.BatchTx()
@ -98,7 +100,7 @@ func BenchmarkConsistentIndex(b *testing.B) {
func BenchmarkStorePutUpdate(b *testing.B) {
var i fakeConsistentIndex
be, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(be, &lease.FakeLessor{}, &i)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i)
defer cleanup(s, be, tmpPath)
// arbitrary number of bytes
@ -117,7 +119,7 @@ func BenchmarkStorePutUpdate(b *testing.B) {
func BenchmarkStoreTxnPut(b *testing.B) {
var i fakeConsistentIndex
be, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(be, &lease.FakeLessor{}, &i)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i)
defer cleanup(s, be, tmpPath)
// arbitrary number of bytes
@ -138,7 +140,7 @@ func BenchmarkStoreTxnPut(b *testing.B) {
func benchmarkStoreRestore(revsPerKey int, b *testing.B) {
var i fakeConsistentIndex
be, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(be, &lease.FakeLessor{}, &i)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i)
// use closure to capture 's' to pick up the reassignment
defer func() { cleanup(s, be, tmpPath) }()
@ -158,7 +160,7 @@ func benchmarkStoreRestore(revsPerKey int, b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
s = NewStore(be, &lease.FakeLessor{}, &i)
s = NewStore(zap.NewExample(), be, &lease.FakeLessor{}, &i)
}
func BenchmarkStoreRestoreRevs1(b *testing.B) {

View File

@ -17,6 +17,8 @@ package mvcc
import (
"encoding/binary"
"time"
"go.uber.org/zap"
)
func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struct{}) bool {
@ -51,7 +53,15 @@ func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struc
revToBytes(revision{main: compactMainRev}, rbytes)
tx.UnsafePut(metaBucketName, finishedCompactKeyName, rbytes)
tx.Unlock()
plog.Printf("finished scheduled compaction at %d (took %v)", compactMainRev, time.Since(totalStart))
if s.lg != nil {
s.lg.Info(
"finished scheduled compaction",
zap.Int64("compact-revision", compactMainRev),
zap.Duration("took", time.Since(totalStart)),
)
} else {
plog.Printf("finished scheduled compaction at %d (took %v)", compactMainRev, time.Since(totalStart))
}
return true
}

View File

@ -22,6 +22,7 @@ import (
"github.com/coreos/etcd/lease"
"github.com/coreos/etcd/mvcc/backend"
"go.uber.org/zap"
)
func TestScheduleCompaction(t *testing.T) {
@ -64,7 +65,7 @@ func TestScheduleCompaction(t *testing.T) {
}
for i, tt := range tests {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
tx := s.b.BatchTx()
tx.Lock()
@ -98,7 +99,7 @@ func TestScheduleCompaction(t *testing.T) {
func TestCompactAllAndRestore(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s0 := NewStore(b, &lease.FakeLessor{}, nil)
s0 := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer os.Remove(tmpPath)
s0.Put([]byte("foo"), []byte("bar"), lease.NoLease)
@ -124,7 +125,7 @@ func TestCompactAllAndRestore(t *testing.T) {
t.Fatal(err)
}
s1 := NewStore(b, &lease.FakeLessor{}, nil)
s1 := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
if s1.Rev() != rev {
t.Errorf("rev = %v, want %v", s1.Rev(), rev)
}

View File

@ -31,11 +31,12 @@ import (
"github.com/coreos/etcd/mvcc/mvccpb"
"github.com/coreos/etcd/pkg/schedule"
"github.com/coreos/etcd/pkg/testutil"
"go.uber.org/zap"
)
func TestStoreRev(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer s.Close()
defer os.Remove(tmpPath)
@ -419,7 +420,7 @@ func TestRestoreDelete(t *testing.T) {
defer func() { restoreChunkKeys = oldChunk }()
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer os.Remove(tmpPath)
keys := make(map[string]struct{})
@ -445,7 +446,7 @@ func TestRestoreDelete(t *testing.T) {
}
s.Close()
s = NewStore(b, &lease.FakeLessor{}, nil)
s = NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer s.Close()
for i := 0; i < 20; i++ {
ks := fmt.Sprintf("foo-%d", i)
@ -465,7 +466,7 @@ func TestRestoreDelete(t *testing.T) {
func TestRestoreContinueUnfinishedCompaction(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s0 := NewStore(b, &lease.FakeLessor{}, nil)
s0 := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer os.Remove(tmpPath)
s0.Put([]byte("foo"), []byte("bar"), lease.NoLease)
@ -482,7 +483,7 @@ func TestRestoreContinueUnfinishedCompaction(t *testing.T) {
s0.Close()
s1 := NewStore(b, &lease.FakeLessor{}, nil)
s1 := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
// wait for scheduled compaction to be finished
time.Sleep(100 * time.Millisecond)
@ -519,7 +520,7 @@ type hashKVResult struct {
// TestHashKVWhenCompacting ensures that HashKV returns correct hash when compacting.
func TestHashKVWhenCompacting(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer os.Remove(tmpPath)
rev := 10000
@ -587,7 +588,7 @@ func TestHashKVWhenCompacting(t *testing.T) {
// correct hash value with latest revision.
func TestHashKVZeroRevision(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer os.Remove(tmpPath)
rev := 1000
@ -620,7 +621,7 @@ func TestTxnPut(t *testing.T) {
vals := createBytesSlice(bytesN, sliceN)
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer cleanup(s, b, tmpPath)
for i := 0; i < sliceN; i++ {
@ -635,7 +636,7 @@ func TestTxnPut(t *testing.T) {
func TestTxnBlockBackendForceCommit(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(b, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer os.Remove(tmpPath)
txn := s.Read()

View File

@ -21,6 +21,7 @@ import (
"github.com/coreos/etcd/lease"
"github.com/coreos/etcd/mvcc/backend"
"github.com/coreos/etcd/mvcc/mvccpb"
"go.uber.org/zap"
)
// non-const so modifiable by tests
@ -67,13 +68,13 @@ type watchableStore struct {
// cancel operations.
type cancelFunc func()
func New(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) ConsistentWatchableKV {
return newWatchableStore(b, le, ig)
func New(lg *zap.Logger, b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) ConsistentWatchableKV {
return newWatchableStore(lg, b, le, ig)
}
func newWatchableStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *watchableStore {
func newWatchableStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *watchableStore {
s := &watchableStore{
store: NewStore(b, le, ig),
store: NewStore(lg, b, le, ig),
victimc: make(chan struct{}, 1),
unsynced: newWatcherGroup(),
synced: newWatcherGroup(),

View File

@ -21,11 +21,13 @@ import (
"github.com/coreos/etcd/lease"
"github.com/coreos/etcd/mvcc/backend"
"go.uber.org/zap"
)
func BenchmarkWatchableStorePut(b *testing.B) {
be, tmpPath := backend.NewDefaultTmpBackend()
s := New(be, &lease.FakeLessor{}, nil)
s := New(zap.NewExample(), be, &lease.FakeLessor{}, nil)
defer cleanup(s, be, tmpPath)
// arbitrary number of bytes
@ -46,7 +48,7 @@ func BenchmarkWatchableStorePut(b *testing.B) {
func BenchmarkWatchableStoreTxnPut(b *testing.B) {
var i fakeConsistentIndex
be, tmpPath := backend.NewDefaultTmpBackend()
s := New(be, &lease.FakeLessor{}, &i)
s := New(zap.NewExample(), be, &lease.FakeLessor{}, &i)
defer cleanup(s, be, tmpPath)
// arbitrary number of bytes
@ -67,7 +69,7 @@ func BenchmarkWatchableStoreTxnPut(b *testing.B) {
// many synced watchers receiving a Put notification.
func BenchmarkWatchableStoreWatchSyncPut(b *testing.B) {
be, tmpPath := backend.NewDefaultTmpBackend()
s := newWatchableStore(be, &lease.FakeLessor{}, nil)
s := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, nil)
defer cleanup(s, be, tmpPath)
k := []byte("testkey")
@ -105,7 +107,7 @@ func BenchmarkWatchableStoreWatchSyncPut(b *testing.B) {
// we should put to simulate the real-world use cases.
func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
be, tmpPath := backend.NewDefaultTmpBackend()
s := NewStore(be, &lease.FakeLessor{}, nil)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, nil)
// manually create watchableStore instead of newWatchableStore
// because newWatchableStore periodically calls syncWatchersLoop
@ -162,7 +164,7 @@ func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
func BenchmarkWatchableStoreSyncedCancel(b *testing.B) {
be, tmpPath := backend.NewDefaultTmpBackend()
s := newWatchableStore(be, &lease.FakeLessor{}, nil)
s := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, nil)
defer func() {
s.store.Close()

View File

@ -26,11 +26,12 @@ import (
"github.com/coreos/etcd/lease"
"github.com/coreos/etcd/mvcc/backend"
"github.com/coreos/etcd/mvcc/mvccpb"
"go.uber.org/zap"
)
func TestWatch(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := newWatchableStore(b, &lease.FakeLessor{}, nil)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer func() {
s.store.Close()
@ -52,7 +53,7 @@ func TestWatch(t *testing.T) {
func TestNewWatcherCancel(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := newWatchableStore(b, &lease.FakeLessor{}, nil)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer func() {
s.store.Close()
@ -84,7 +85,7 @@ func TestCancelUnsynced(t *testing.T) {
// method to sync watchers in unsynced map. We want to keep watchers
// in unsynced to test if syncWatchers works as expected.
s := &watchableStore{
store: NewStore(b, &lease.FakeLessor{}, nil),
store: NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil),
unsynced: newWatcherGroup(),
// to make the test not crash from assigning to nil map.
@ -139,7 +140,7 @@ func TestSyncWatchers(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := &watchableStore{
store: NewStore(b, &lease.FakeLessor{}, nil),
store: NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil),
unsynced: newWatcherGroup(),
synced: newWatcherGroup(),
}
@ -222,7 +223,7 @@ func TestSyncWatchers(t *testing.T) {
// TestWatchCompacted tests a watcher that watches on a compacted revision.
func TestWatchCompacted(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := newWatchableStore(b, &lease.FakeLessor{}, nil)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer func() {
s.store.Close()
@ -259,7 +260,7 @@ func TestWatchCompacted(t *testing.T) {
func TestWatchFutureRev(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := newWatchableStore(b, &lease.FakeLessor{}, nil)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer func() {
s.store.Close()
@ -300,7 +301,7 @@ func TestWatchRestore(t *testing.T) {
test := func(delay time.Duration) func(t *testing.T) {
return func(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := newWatchableStore(b, &lease.FakeLessor{}, nil)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer cleanup(s, b, tmpPath)
testKey := []byte("foo")
@ -308,7 +309,7 @@ func TestWatchRestore(t *testing.T) {
rev := s.Put(testKey, testValue, lease.NoLease)
newBackend, newPath := backend.NewDefaultTmpBackend()
newStore := newWatchableStore(newBackend, &lease.FakeLessor{}, nil)
newStore := newWatchableStore(zap.NewExample(), newBackend, &lease.FakeLessor{}, nil)
defer cleanup(newStore, newBackend, newPath)
w := newStore.NewWatchStream()
@ -341,7 +342,7 @@ func TestWatchRestore(t *testing.T) {
// TestWatchBatchUnsynced tests batching on unsynced watchers
func TestWatchBatchUnsynced(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := newWatchableStore(b, &lease.FakeLessor{}, nil)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
oldMaxRevs := watchBatchMaxRevs
defer func() {
@ -475,7 +476,7 @@ func TestWatchVictims(t *testing.T) {
oldChanBufLen, oldMaxWatchersPerSync := chanBufLen, maxWatchersPerSync
b, tmpPath := backend.NewDefaultTmpBackend()
s := newWatchableStore(b, &lease.FakeLessor{}, nil)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer func() {
s.store.Close()
@ -553,7 +554,7 @@ func TestWatchVictims(t *testing.T) {
// canceling its watches.
func TestStressWatchCancelClose(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := newWatchableStore(b, &lease.FakeLessor{}, nil)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer func() {
s.store.Close()

View File

@ -20,11 +20,13 @@ import (
"github.com/coreos/etcd/lease"
"github.com/coreos/etcd/mvcc/backend"
"go.uber.org/zap"
)
func BenchmarkKVWatcherMemoryUsage(b *testing.B) {
be, tmpPath := backend.NewDefaultTmpBackend()
watchable := newWatchableStore(be, &lease.FakeLessor{}, nil)
watchable := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, nil)
defer cleanup(watchable, be, tmpPath)

View File

@ -25,13 +25,14 @@ import (
"github.com/coreos/etcd/lease"
"github.com/coreos/etcd/mvcc/backend"
"github.com/coreos/etcd/mvcc/mvccpb"
"go.uber.org/zap"
)
// TestWatcherWatchID tests that each watcher provides unique watchID,
// and the watched event attaches the correct watchID.
func TestWatcherWatchID(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := WatchableKV(newWatchableStore(b, &lease.FakeLessor{}, nil))
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil))
defer cleanup(s, b, tmpPath)
w := s.NewWatchStream()
@ -81,7 +82,7 @@ func TestWatcherWatchID(t *testing.T) {
func TestWatcherRequestsCustomID(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := WatchableKV(newWatchableStore(b, &lease.FakeLessor{}, nil))
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil))
defer cleanup(s, b, tmpPath)
w := s.NewWatchStream()
@ -118,7 +119,7 @@ func TestWatcherRequestsCustomID(t *testing.T) {
// and returns events with matching prefixes.
func TestWatcherWatchPrefix(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := WatchableKV(newWatchableStore(b, &lease.FakeLessor{}, nil))
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil))
defer cleanup(s, b, tmpPath)
w := s.NewWatchStream()
@ -192,7 +193,7 @@ func TestWatcherWatchPrefix(t *testing.T) {
// does not create watcher, which panics when canceling in range tree.
func TestWatcherWatchWrongRange(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := WatchableKV(newWatchableStore(b, &lease.FakeLessor{}, nil))
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil))
defer cleanup(s, b, tmpPath)
w := s.NewWatchStream()
@ -212,7 +213,7 @@ func TestWatcherWatchWrongRange(t *testing.T) {
func TestWatchDeleteRange(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := newWatchableStore(b, &lease.FakeLessor{}, nil)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil)
defer func() {
s.store.Close()
@ -251,7 +252,7 @@ func TestWatchDeleteRange(t *testing.T) {
// with given id inside watchStream.
func TestWatchStreamCancelWatcherByID(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := WatchableKV(newWatchableStore(b, &lease.FakeLessor{}, nil))
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil))
defer cleanup(s, b, tmpPath)
w := s.NewWatchStream()
@ -294,7 +295,7 @@ func TestWatcherRequestProgress(t *testing.T) {
// method to sync watchers in unsynced map. We want to keep watchers
// in unsynced to test if syncWatchers works as expected.
s := &watchableStore{
store: NewStore(b, &lease.FakeLessor{}, nil),
store: NewStore(zap.NewExample(), b, &lease.FakeLessor{}, nil),
unsynced: newWatcherGroup(),
synced: newWatcherGroup(),
}
@ -343,7 +344,7 @@ func TestWatcherRequestProgress(t *testing.T) {
func TestWatcherWatchWithFilter(t *testing.T) {
b, tmpPath := backend.NewDefaultTmpBackend()
s := WatchableKV(newWatchableStore(b, &lease.FakeLessor{}, nil))
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, nil))
defer cleanup(s, b, tmpPath)
w := s.NewWatchStream()

View File

@ -20,14 +20,16 @@ import (
"sort"
"strings"
"time"
"go.uber.org/zap"
)
func PurgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error {
return purgeFile(dirname, suffix, max, interval, stop, nil)
func PurgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error {
return purgeFile(lg, dirname, suffix, max, interval, stop, nil)
}
// purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil.
func purgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string) <-chan error {
func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string) <-chan error {
errC := make(chan error, 1)
go func() {
for {
@ -55,11 +57,19 @@ func purgeFile(dirname string, suffix string, max uint, interval time.Duration,
return
}
if err = l.Close(); err != nil {
plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err)
if lg != nil {
lg.Warn("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err))
} else {
plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err)
}
errC <- err
return
}
plog.Infof("purged file %s successfully", f)
if lg != nil {
lg.Info("purged", zap.String("path", f))
} else {
plog.Infof("purged file %s successfully", f)
}
newfnames = newfnames[1:]
}
if purgec != nil {

View File

@ -22,6 +22,8 @@ import (
"reflect"
"testing"
"time"
"go.uber.org/zap"
)
func TestPurgeFile(t *testing.T) {
@ -43,7 +45,7 @@ func TestPurgeFile(t *testing.T) {
stop, purgec := make(chan struct{}), make(chan string, 10)
// keep 3 most recent files
errch := purgeFile(dir, "test", 3, time.Millisecond, stop, purgec)
errch := purgeFile(zap.NewExample(), dir, "test", 3, time.Millisecond, stop, purgec)
select {
case f := <-purgec:
t.Errorf("unexpected purge on %q", f)
@ -114,7 +116,7 @@ func TestPurgeFileHoldingLockFile(t *testing.T) {
}
stop, purgec := make(chan struct{}), make(chan string, 10)
errch := purgeFile(dir, "test", 3, time.Millisecond, stop, purgec)
errch := purgeFile(zap.NewExample(), dir, "test", 3, time.Millisecond, stop, purgec)
for i := 0; i < 5; i++ {
select {

View File

@ -17,6 +17,7 @@ package logutil
import "google.golang.org/grpc/grpclog"
// Logger defines logging interface.
// TODO: deprecate in v3.5.
type Logger interface {
grpclog.LoggerV2

166
pkg/logutil/zap.go Normal file
View File

@ -0,0 +1,166 @@
// Copyright 2018 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logutil
import (
"github.com/coreos/etcd/raft"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"google.golang.org/grpc/grpclog"
)
// NewGRPCLoggerV2 converts "*zap.Logger" to "grpclog.LoggerV2".
// It discards all INFO level logging in gRPC, if debug level
// is not enabled in "*zap.Logger".
func NewGRPCLoggerV2(lcfg zap.Config) (grpclog.LoggerV2, error) {
lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil"
if err != nil {
return nil, err
}
return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()}, nil
}
type zapGRPCLogger struct {
lg *zap.Logger
sugar *zap.SugaredLogger
}
func (zl *zapGRPCLogger) Info(args ...interface{}) {
if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
return
}
zl.sugar.Info(args...)
}
func (zl *zapGRPCLogger) Infoln(args ...interface{}) {
if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
return
}
zl.sugar.Info(args...)
}
func (zl *zapGRPCLogger) Infof(format string, args ...interface{}) {
if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
return
}
zl.sugar.Infof(format, args...)
}
func (zl *zapGRPCLogger) Warning(args ...interface{}) {
zl.sugar.Warn(args...)
}
func (zl *zapGRPCLogger) Warningln(args ...interface{}) {
zl.sugar.Warn(args...)
}
func (zl *zapGRPCLogger) Warningf(format string, args ...interface{}) {
zl.sugar.Warnf(format, args...)
}
func (zl *zapGRPCLogger) Error(args ...interface{}) {
zl.sugar.Error(args...)
}
func (zl *zapGRPCLogger) Errorln(args ...interface{}) {
zl.sugar.Error(args...)
}
func (zl *zapGRPCLogger) Errorf(format string, args ...interface{}) {
zl.sugar.Errorf(format, args...)
}
func (zl *zapGRPCLogger) Fatal(args ...interface{}) {
zl.sugar.Fatal(args...)
}
func (zl *zapGRPCLogger) Fatalln(args ...interface{}) {
zl.sugar.Fatal(args...)
}
func (zl *zapGRPCLogger) Fatalf(format string, args ...interface{}) {
zl.sugar.Fatalf(format, args...)
}
func (zl *zapGRPCLogger) V(l int) bool {
// infoLog == 0
if l <= 0 { // debug level, then we ignore info level in gRPC
return !zl.lg.Core().Enabled(zapcore.DebugLevel)
}
return true
}
// NewRaftLogger converts "*zap.Logger" to "raft.Logger".
func NewRaftLogger(lcfg zap.Config) (raft.Logger, error) {
lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil"
if err != nil {
return nil, err
}
return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}, nil
}
type zapRaftLogger struct {
lg *zap.Logger
sugar *zap.SugaredLogger
}
func (zl *zapRaftLogger) Debug(args ...interface{}) {
zl.sugar.Debug(args...)
}
func (zl *zapRaftLogger) Debugf(format string, args ...interface{}) {
zl.sugar.Debugf(format, args...)
}
func (zl *zapRaftLogger) Error(args ...interface{}) {
zl.sugar.Error(args...)
}
func (zl *zapRaftLogger) Errorf(format string, args ...interface{}) {
zl.sugar.Errorf(format, args...)
}
func (zl *zapRaftLogger) Info(args ...interface{}) {
zl.sugar.Info(args...)
}
func (zl *zapRaftLogger) Infof(format string, args ...interface{}) {
zl.sugar.Infof(format, args...)
}
func (zl *zapRaftLogger) Warning(args ...interface{}) {
zl.sugar.Warn(args...)
}
func (zl *zapRaftLogger) Warningf(format string, args ...interface{}) {
zl.sugar.Warnf(format, args...)
}
func (zl *zapRaftLogger) Fatal(args ...interface{}) {
zl.sugar.Fatal(args...)
}
func (zl *zapRaftLogger) Fatalf(format string, args ...interface{}) {
zl.sugar.Fatalf(format, args...)
}
func (zl *zapRaftLogger) Panic(args ...interface{}) {
zl.sugar.Panic(args...)
}
func (zl *zapRaftLogger) Panicf(format string, args ...interface{}) {
zl.sugar.Panicf(format, args...)
}

115
pkg/logutil/zap_test.go Normal file
View File

@ -0,0 +1,115 @@
// Copyright 2018 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logutil
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"go.uber.org/zap"
)
func TestNewGRPCLoggerV2(t *testing.T) {
logPath := filepath.Join(os.TempDir(), fmt.Sprintf("test-log-%d", time.Now().UnixNano()))
defer os.RemoveAll(logPath)
lcfg := zap.Config{
Level: zap.NewAtomicLevelAt(zap.InfoLevel),
Development: false,
Sampling: &zap.SamplingConfig{
Initial: 100,
Thereafter: 100,
},
Encoding: "json",
EncoderConfig: zap.NewProductionEncoderConfig(),
OutputPaths: []string{logPath},
ErrorOutputPaths: []string{logPath},
}
gl, err := NewGRPCLoggerV2(lcfg)
if err != nil {
t.Fatal(err)
}
// debug level is not enabled,
// so info level gRPC-side logging is discarded
gl.Info("etcd-logutil-1")
data, err := ioutil.ReadFile(logPath)
if err != nil {
t.Fatal(err)
}
if bytes.Contains(data, []byte("etcd-logutil-1")) {
t.Fatalf("unexpected line %q", string(data))
}
gl.Warning("etcd-logutil-2")
data, err = ioutil.ReadFile(logPath)
if err != nil {
t.Fatal(err)
}
if !bytes.Contains(data, []byte("etcd-logutil-2")) {
t.Fatalf("can't find data in log %q", string(data))
}
if !bytes.Contains(data, []byte("logutil/zap_test.go:")) {
t.Fatalf("unexpected caller; %q", string(data))
}
}
func TestNewRaftLogger(t *testing.T) {
logPath := filepath.Join(os.TempDir(), fmt.Sprintf("test-log-%d", time.Now().UnixNano()))
defer os.RemoveAll(logPath)
lcfg := zap.Config{
Level: zap.NewAtomicLevelAt(zap.DebugLevel),
Development: false,
Sampling: &zap.SamplingConfig{
Initial: 100,
Thereafter: 100,
},
Encoding: "json",
EncoderConfig: zap.NewProductionEncoderConfig(),
OutputPaths: []string{logPath},
ErrorOutputPaths: []string{logPath},
}
gl, err := NewRaftLogger(lcfg)
if err != nil {
t.Fatal(err)
}
gl.Info("etcd-logutil-1")
data, err := ioutil.ReadFile(logPath)
if err != nil {
t.Fatal(err)
}
if !bytes.Contains(data, []byte("etcd-logutil-1")) {
t.Fatalf("can't find data in log %q", string(data))
}
gl.Warning("etcd-logutil-2")
data, err = ioutil.ReadFile(logPath)
if err != nil {
t.Fatal(err)
}
if !bytes.Contains(data, []byte("etcd-logutil-2")) {
t.Fatalf("can't find data in log %q", string(data))
}
if !bytes.Contains(data, []byte("logutil/zap_test.go:")) {
t.Fatalf("unexpected caller; %q", string(data))
}
}

View File

@ -21,6 +21,8 @@ import (
"os/signal"
"sync"
"syscall"
"go.uber.org/zap"
)
// InterruptHandler is a function that is called on receiving a
@ -43,7 +45,7 @@ func RegisterInterruptHandler(h InterruptHandler) {
}
// HandleInterrupts calls the handler functions on receiving a SIGINT or SIGTERM.
func HandleInterrupts() {
func HandleInterrupts(lg *zap.Logger) {
notifier := make(chan os.Signal, 1)
signal.Notify(notifier, syscall.SIGINT, syscall.SIGTERM)
@ -57,7 +59,11 @@ func HandleInterrupts() {
interruptExitMu.Lock()
plog.Noticef("received %v signal, shutting down...", sig)
if lg != nil {
lg.Info("received signal; shutting down", zap.String("signal", sig.String()))
} else {
plog.Noticef("received %v signal, shutting down...", sig)
}
for _, h := range ihs {
h()

View File

@ -16,7 +16,11 @@
package osutil
import "os"
import (
"os"
"go.uber.org/zap"
)
type InterruptHandler func()
@ -24,7 +28,7 @@ type InterruptHandler func()
func RegisterInterruptHandler(h InterruptHandler) {}
// HandleInterrupts is a no-op on windows
func HandleInterrupts() {}
func HandleInterrupts(*zap.Logger) {}
// Exit calls os.Exit
func Exit(code int) {

View File

@ -21,6 +21,8 @@ import (
"syscall"
"testing"
"time"
"go.uber.org/zap"
)
func init() { setDflSignal = func(syscall.Signal) {} }
@ -69,7 +71,7 @@ func TestHandleInterrupts(t *testing.T) {
c := make(chan os.Signal, 2)
signal.Notify(c, sig)
HandleInterrupts()
HandleInterrupts(zap.NewExample())
syscall.Kill(syscall.Getpid(), sig)
// we should receive the signal once from our own kill and

View File

@ -116,10 +116,12 @@ func SelfCert(lg *zap.Logger, dirpath string, hosts []string) (info TLSInfo, err
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
info.Logger.Warn(
"cannot generate random number",
zap.Error(err),
)
if info.Logger != nil {
info.Logger.Warn(
"cannot generate random number",
zap.Error(err),
)
}
return
}
@ -145,19 +147,23 @@ func SelfCert(lg *zap.Logger, dirpath string, hosts []string) (info TLSInfo, err
priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
if err != nil {
info.Logger.Warn(
"cannot generate ECDSA key",
zap.Error(err),
)
if info.Logger != nil {
info.Logger.Warn(
"cannot generate ECDSA key",
zap.Error(err),
)
}
return
}
derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv)
if err != nil {
info.Logger.Warn(
"cannot generate x509 certificate",
zap.Error(err),
)
if info.Logger != nil {
info.Logger.Warn(
"cannot generate x509 certificate",
zap.Error(err),
)
}
return
}
@ -172,7 +178,9 @@ func SelfCert(lg *zap.Logger, dirpath string, hosts []string) (info TLSInfo, err
}
pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
certOut.Close()
info.Logger.Debug("created cert file", zap.String("path", certPath))
if info.Logger != nil {
info.Logger.Info("created cert file", zap.String("path", certPath))
}
b, err := x509.MarshalECPrivateKey(priv)
if err != nil {
@ -180,17 +188,20 @@ func SelfCert(lg *zap.Logger, dirpath string, hosts []string) (info TLSInfo, err
}
keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
info.Logger.Warn(
"cannot key file",
zap.String("path", keyPath),
zap.Error(err),
)
if info.Logger != nil {
info.Logger.Warn(
"cannot key file",
zap.String("path", keyPath),
zap.Error(err),
)
}
return
}
pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b})
keyOut.Close()
info.Logger.Debug("created key file", zap.String("path", keyPath))
if info.Logger != nil {
info.Logger.Info("created key file", zap.String("path", keyPath))
}
return SelfCert(lg, dirpath, hosts)
}
@ -250,38 +261,46 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) {
cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (cert *tls.Certificate, err error) {
cert, err = tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc)
if os.IsNotExist(err) {
info.Logger.Warn(
"failed to find peer cert files",
zap.String("cert-file", info.CertFile),
zap.String("key-file", info.KeyFile),
zap.Error(err),
)
if info.Logger != nil {
info.Logger.Warn(
"failed to find peer cert files",
zap.String("cert-file", info.CertFile),
zap.String("key-file", info.KeyFile),
zap.Error(err),
)
}
} else if err != nil {
info.Logger.Warn(
"failed to create peer certificate",
zap.String("cert-file", info.CertFile),
zap.String("key-file", info.KeyFile),
zap.Error(err),
)
if info.Logger != nil {
info.Logger.Warn(
"failed to create peer certificate",
zap.String("cert-file", info.CertFile),
zap.String("key-file", info.KeyFile),
zap.Error(err),
)
}
}
return cert, err
}
cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (cert *tls.Certificate, err error) {
cert, err = tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc)
if os.IsNotExist(err) {
info.Logger.Warn(
"failed to find client cert files",
zap.String("cert-file", info.CertFile),
zap.String("key-file", info.KeyFile),
zap.Error(err),
)
if info.Logger != nil {
info.Logger.Warn(
"failed to find client cert files",
zap.String("cert-file", info.CertFile),
zap.String("key-file", info.KeyFile),
zap.Error(err),
)
}
} else if err != nil {
info.Logger.Warn(
"failed to create client certificate",
zap.String("cert-file", info.CertFile),
zap.String("key-file", info.KeyFile),
zap.Error(err),
)
if info.Logger != nil {
info.Logger.Warn(
"failed to create client certificate",
zap.String("cert-file", info.CertFile),
zap.String("key-file", info.KeyFile),
zap.Error(err),
)
}
}
return cert, err
}

View File

@ -23,11 +23,10 @@ import (
"time"
"github.com/coreos/pkg/capnslog"
"go.uber.org/zap"
)
var (
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "proxy/tcpproxy")
)
var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "proxy/tcpproxy")
type remote struct {
mu sync.Mutex
@ -61,6 +60,7 @@ func (r *remote) isActive() bool {
}
type TCPProxy struct {
Logger *zap.Logger
Listener net.Listener
Endpoints []*net.SRV
MonitorInterval time.Duration
@ -86,7 +86,11 @@ func (tp *TCPProxy) Run() error {
for _, ep := range tp.Endpoints {
eps = append(eps, fmt.Sprintf("%s:%d", ep.Target, ep.Port))
}
plog.Printf("ready to proxy client requests to %+v", eps)
if tp.Logger != nil {
tp.Logger.Info("ready to proxy client requests", zap.Strings("endpoints", eps))
} else {
plog.Printf("ready to proxy client requests to %+v", eps)
}
go tp.runMonitor()
for {
@ -175,7 +179,11 @@ func (tp *TCPProxy) serve(in net.Conn) {
break
}
remote.inactivate()
plog.Warningf("deactivated endpoint [%s] due to %v for %v", remote.addr, err, tp.MonitorInterval)
if tp.Logger != nil {
tp.Logger.Warn("deactivated endpoint", zap.String("address", remote.addr), zap.Duration("interval", tp.MonitorInterval), zap.Error(err))
} else {
plog.Warningf("deactivated endpoint [%s] due to %v for %v", remote.addr, err, tp.MonitorInterval)
}
}
if out == nil {
@ -205,9 +213,17 @@ func (tp *TCPProxy) runMonitor() {
}
go func(r *remote) {
if err := r.tryReactivate(); err != nil {
plog.Warningf("failed to activate endpoint [%s] due to %v (stay inactive for another %v)", r.addr, err, tp.MonitorInterval)
if tp.Logger != nil {
tp.Logger.Warn("failed to activate endpoint (stay inactive for another interval)", zap.String("address", r.addr), zap.Duration("interval", tp.MonitorInterval), zap.Error(err))
} else {
plog.Warningf("failed to activate endpoint [%s] due to %v (stay inactive for another %v)", r.addr, err, tp.MonitorInterval)
}
} else {
plog.Printf("activated %s", r.addr)
if tp.Logger != nil {
tp.Logger.Info("activated", zap.String("address", r.addr))
} else {
plog.Printf("activated %s", r.addr)
}
}
}(rem)
}

View File

@ -28,6 +28,8 @@ import (
"github.com/coreos/etcd/raft/raftpb"
"github.com/coreos/etcd/raftsnap"
"github.com/coreos/etcd/version"
"go.uber.org/zap"
)
const (
@ -59,9 +61,11 @@ type writerToResponse interface {
}
type pipelineHandler struct {
tr Transporter
r Raft
cid types.ID
lg *zap.Logger
localID types.ID
tr Transporter
r Raft
cid types.ID
}
// newPipelineHandler returns a handler for handling raft messages
@ -69,11 +73,13 @@ type pipelineHandler struct {
//
// The handler reads out the raft message from request body,
// and forwards it to the given raft state machine for processing.
func newPipelineHandler(tr Transporter, r Raft, cid types.ID) http.Handler {
func newPipelineHandler(t *Transport, r Raft, cid types.ID) http.Handler {
return &pipelineHandler{
tr: tr,
r: r,
cid: cid,
lg: t.Logger,
localID: t.ID,
tr: t,
r: r,
cid: cid,
}
}
@ -86,7 +92,7 @@ func (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Etcd-Cluster-ID", h.cid.String())
if err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil {
if err := checkClusterCompatibilityFromHeader(h.lg, h.localID, r.Header, h.cid); err != nil {
http.Error(w, err.Error(), http.StatusPreconditionFailed)
return
}
@ -98,7 +104,15 @@ func (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
limitedr := pioutil.NewLimitedBufferReader(r.Body, connReadLimitByte)
b, err := ioutil.ReadAll(limitedr)
if err != nil {
plog.Errorf("failed to read raft message (%v)", err)
if h.lg != nil {
h.lg.Warn(
"failed to read Raft message",
zap.String("local-member-id", h.localID.String()),
zap.Error(err),
)
} else {
plog.Errorf("failed to read raft message (%v)", err)
}
http.Error(w, "error reading raft message", http.StatusBadRequest)
recvFailures.WithLabelValues(r.RemoteAddr).Inc()
return
@ -106,7 +120,15 @@ func (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var m raftpb.Message
if err := m.Unmarshal(b); err != nil {
plog.Errorf("failed to unmarshal raft message (%v)", err)
if h.lg != nil {
h.lg.Warn(
"failed to unmarshal Raft message",
zap.String("local-member-id", h.localID.String()),
zap.Error(err),
)
} else {
plog.Errorf("failed to unmarshal raft message (%v)", err)
}
http.Error(w, "error unmarshaling raft message", http.StatusBadRequest)
recvFailures.WithLabelValues(r.RemoteAddr).Inc()
return
@ -119,7 +141,15 @@ func (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
case writerToResponse:
v.WriteTo(w)
default:
plog.Warningf("failed to process raft message (%v)", err)
if h.lg != nil {
h.lg.Warn(
"failed to process Raft message",
zap.String("local-member-id", h.localID.String()),
zap.Error(err),
)
} else {
plog.Warningf("failed to process raft message (%v)", err)
}
http.Error(w, "error processing raft message", http.StatusInternalServerError)
w.(http.Flusher).Flush()
// disconnect the http stream
@ -134,17 +164,22 @@ func (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
type snapshotHandler struct {
lg *zap.Logger
tr Transporter
r Raft
snapshotter *raftsnap.Snapshotter
cid types.ID
localID types.ID
cid types.ID
}
func newSnapshotHandler(tr Transporter, r Raft, snapshotter *raftsnap.Snapshotter, cid types.ID) http.Handler {
func newSnapshotHandler(t *Transport, r Raft, snapshotter *raftsnap.Snapshotter, cid types.ID) http.Handler {
return &snapshotHandler{
tr: tr,
lg: t.Logger,
tr: t,
r: r,
snapshotter: snapshotter,
localID: t.ID,
cid: cid,
}
}
@ -167,7 +202,7 @@ func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Etcd-Cluster-ID", h.cid.String())
if err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil {
if err := checkClusterCompatibilityFromHeader(h.lg, h.localID, r.Header, h.cid); err != nil {
http.Error(w, err.Error(), http.StatusPreconditionFailed)
return
}
@ -179,7 +214,16 @@ func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
m, err := dec.decodeLimit(uint64(1 << 63))
if err != nil {
msg := fmt.Sprintf("failed to decode raft message (%v)", err)
plog.Errorf(msg)
if h.lg != nil {
h.lg.Warn(
"failed to decode Raft message",
zap.String("local-member-id", h.localID.String()),
zap.String("remote-snapshot-sender-id", types.ID(m.From).String()),
zap.Error(err),
)
} else {
plog.Error(msg)
}
http.Error(w, msg, http.StatusBadRequest)
recvFailures.WithLabelValues(r.RemoteAddr).Inc()
return
@ -188,22 +232,61 @@ func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
receivedBytes.WithLabelValues(types.ID(m.From).String()).Add(float64(m.Size()))
if m.Type != raftpb.MsgSnap {
plog.Errorf("unexpected raft message type %s on snapshot path", m.Type)
if h.lg != nil {
h.lg.Warn(
"unexpected Raft message type",
zap.String("local-member-id", h.localID.String()),
zap.String("remote-snapshot-sender-id", types.ID(m.From).String()),
zap.String("message-type", m.Type.String()),
)
} else {
plog.Errorf("unexpected raft message type %s on snapshot path", m.Type)
}
http.Error(w, "wrong raft message type", http.StatusBadRequest)
return
}
plog.Infof("receiving database snapshot [index:%d, from %s] ...", m.Snapshot.Metadata.Index, types.ID(m.From))
if h.lg != nil {
h.lg.Info(
"receiving database snapshot",
zap.String("local-member-id", h.localID.String()),
zap.String("remote-snapshot-sender-id", types.ID(m.From).String()),
zap.Uint64("snapshot-index", m.Snapshot.Metadata.Index),
)
} else {
plog.Infof("receiving database snapshot [index:%d, from %s] ...", m.Snapshot.Metadata.Index, types.ID(m.From))
}
// save incoming database snapshot.
n, err := h.snapshotter.SaveDBFrom(r.Body, m.Snapshot.Metadata.Index)
if err != nil {
msg := fmt.Sprintf("failed to save KV snapshot (%v)", err)
plog.Error(msg)
if h.lg != nil {
h.lg.Warn(
"failed to save KV snapshot",
zap.String("local-member-id", h.localID.String()),
zap.String("remote-snapshot-sender-id", types.ID(m.From).String()),
zap.Error(err),
)
} else {
plog.Error(msg)
}
http.Error(w, msg, http.StatusInternalServerError)
return
}
receivedBytes.WithLabelValues(types.ID(m.From).String()).Add(float64(n))
plog.Infof("received and saved database snapshot [index: %d, from: %s] successfully", m.Snapshot.Metadata.Index, types.ID(m.From))
if h.lg != nil {
h.lg.Info(
"received and saved database snapshot",
zap.String("local-member-id", h.localID.String()),
zap.String("remote-snapshot-sender-id", types.ID(m.From).String()),
zap.Uint64("snapshot-index", m.Snapshot.Metadata.Index),
)
} else {
plog.Infof("received and saved database snapshot [index: %d, from: %s] successfully", m.Snapshot.Metadata.Index, types.ID(m.From))
}
if err := h.r.Process(context.TODO(), m); err != nil {
switch v := err.(type) {
@ -213,17 +296,28 @@ func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
v.WriteTo(w)
default:
msg := fmt.Sprintf("failed to process raft message (%v)", err)
plog.Warningf(msg)
if h.lg != nil {
h.lg.Warn(
"failed to process Raft message",
zap.String("local-member-id", h.localID.String()),
zap.String("remote-snapshot-sender-id", types.ID(m.From).String()),
zap.Error(err),
)
} else {
plog.Error(msg)
}
http.Error(w, msg, http.StatusInternalServerError)
}
return
}
// Write StatusNoContent header after the message has been processed by
// raft, which facilitates the client to report MsgSnap status.
w.WriteHeader(http.StatusNoContent)
}
type streamHandler struct {
lg *zap.Logger
tr *Transport
peerGetter peerGetter
r Raft
@ -231,9 +325,10 @@ type streamHandler struct {
cid types.ID
}
func newStreamHandler(tr *Transport, pg peerGetter, r Raft, id, cid types.ID) http.Handler {
func newStreamHandler(t *Transport, pg peerGetter, r Raft, id, cid types.ID) http.Handler {
return &streamHandler{
tr: tr,
lg: t.Logger,
tr: t,
peerGetter: pg,
r: r,
id: id,
@ -251,7 +346,7 @@ func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-Server-Version", version.Version)
w.Header().Set("X-Etcd-Cluster-ID", h.cid.String())
if err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil {
if err := checkClusterCompatibilityFromHeader(h.lg, h.tr.ID, r.Header, h.cid); err != nil {
http.Error(w, err.Error(), http.StatusPreconditionFailed)
return
}
@ -263,7 +358,16 @@ func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
case streamTypeMessage.endpoint():
t = streamTypeMessage
default:
plog.Debugf("ignored unexpected streaming request path %s", r.URL.Path)
if h.lg != nil {
h.lg.Debug(
"ignored unexpected streaming request path",
zap.String("local-member-id", h.tr.ID.String()),
zap.String("remote-peer-id-stream-handler", h.id.String()),
zap.String("path", r.URL.Path),
)
} else {
plog.Debugf("ignored unexpected streaming request path %s", r.URL.Path)
}
http.Error(w, "invalid path", http.StatusNotFound)
return
}
@ -271,12 +375,31 @@ func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
fromStr := path.Base(r.URL.Path)
from, err := types.IDFromString(fromStr)
if err != nil {
plog.Errorf("failed to parse from %s into ID (%v)", fromStr, err)
if h.lg != nil {
h.lg.Warn(
"failed to parse path into ID",
zap.String("local-member-id", h.tr.ID.String()),
zap.String("remote-peer-id-stream-handler", h.id.String()),
zap.String("path", fromStr),
zap.Error(err),
)
} else {
plog.Errorf("failed to parse from %s into ID (%v)", fromStr, err)
}
http.Error(w, "invalid from", http.StatusNotFound)
return
}
if h.r.IsIDRemoved(uint64(from)) {
plog.Warningf("rejected the stream from peer %s since it was removed", from)
if h.lg != nil {
h.lg.Warn(
"rejected stream from remote peer because it was removed",
zap.String("local-member-id", h.tr.ID.String()),
zap.String("remote-peer-id-stream-handler", h.id.String()),
zap.String("remote-peer-id-from", from.String()),
)
} else {
plog.Warningf("rejected the stream from peer %s since it was removed", from)
}
http.Error(w, "removed member", http.StatusGone)
return
}
@ -290,14 +413,35 @@ func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if urls := r.Header.Get("X-PeerURLs"); urls != "" {
h.tr.AddRemote(from, strings.Split(urls, ","))
}
plog.Errorf("failed to find member %s in cluster %s", from, h.cid)
if h.lg != nil {
h.lg.Warn(
"failed to find remote peer in cluster",
zap.String("local-member-id", h.tr.ID.String()),
zap.String("remote-peer-id-stream-handler", h.id.String()),
zap.String("remote-peer-id-from", from.String()),
zap.String("cluster-id", h.cid.String()),
)
} else {
plog.Errorf("failed to find member %s in cluster %s", from, h.cid)
}
http.Error(w, "error sender not found", http.StatusNotFound)
return
}
wto := h.id.String()
if gto := r.Header.Get("X-Raft-To"); gto != wto {
plog.Errorf("streaming request ignored (ID mismatch got %s want %s)", gto, wto)
if h.lg != nil {
h.lg.Warn(
"ignored streaming request; ID mismatch",
zap.String("local-member-id", h.tr.ID.String()),
zap.String("remote-peer-id-stream-handler", h.id.String()),
zap.String("remote-peer-id-header", gto),
zap.String("remote-peer-id-from", from.String()),
zap.String("cluster-id", h.cid.String()),
)
} else {
plog.Errorf("streaming request ignored (ID mismatch got %s want %s)", gto, wto)
}
http.Error(w, "to field mismatch", http.StatusPreconditionFailed)
return
}
@ -321,13 +465,66 @@ func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// It checks whether the version of local member is compatible with
// the versions in the header, and whether the cluster ID of local member
// matches the one in the header.
func checkClusterCompatibilityFromHeader(header http.Header, cid types.ID) error {
if err := checkVersionCompability(header.Get("X-Server-From"), serverVersion(header), minClusterVersion(header)); err != nil {
plog.Errorf("request version incompatibility (%v)", err)
func checkClusterCompatibilityFromHeader(lg *zap.Logger, localID types.ID, header http.Header, cid types.ID) error {
remoteName := header.Get("X-Server-From")
remoteServer := serverVersion(header)
remoteVs := ""
if remoteServer != nil {
remoteVs = remoteServer.String()
}
remoteMinClusterVer := minClusterVersion(header)
remoteMinClusterVs := ""
if remoteMinClusterVer != nil {
remoteMinClusterVs = remoteMinClusterVer.String()
}
localServer, localMinCluster, err := checkVersionCompatibility(remoteName, remoteServer, remoteMinClusterVer)
localVs := ""
if localServer != nil {
localVs = localServer.String()
}
localMinClusterVs := ""
if localMinCluster != nil {
localMinClusterVs = localMinCluster.String()
}
if err != nil {
if lg != nil {
lg.Warn(
"failed to check version compatibility",
zap.String("local-member-id", localID.String()),
zap.String("local-member-cluster-id", cid.String()),
zap.String("local-member-server-version", localVs),
zap.String("local-member-server-minimum-cluster-version", localMinClusterVs),
zap.String("remote-peer-server-name", remoteName),
zap.String("remote-peer-server-version", remoteVs),
zap.String("remote-peer-server-minimum-cluster-version", remoteMinClusterVs),
zap.Error(err),
)
} else {
plog.Errorf("request version incompatibility (%v)", err)
}
return errIncompatibleVersion
}
if gcid := header.Get("X-Etcd-Cluster-ID"); gcid != cid.String() {
plog.Errorf("request cluster ID mismatch (got %s want %s)", gcid, cid)
if lg != nil {
lg.Warn(
"request cluster ID mismatch",
zap.String("local-member-id", localID.String()),
zap.String("local-member-cluster-id", cid.String()),
zap.String("local-member-server-version", localVs),
zap.String("local-member-server-minimum-cluster-version", localMinClusterVs),
zap.String("remote-peer-server-name", remoteName),
zap.String("remote-peer-server-version", remoteVs),
zap.String("remote-peer-server-minimum-cluster-version", remoteMinClusterVs),
zap.String("remote-peer-cluster-id", gcid),
)
} else {
plog.Errorf("request cluster ID mismatch (got %s want %s)", gcid, cid)
}
return errClusterIDMismatch
}
return nil

View File

@ -31,6 +31,8 @@ import (
"github.com/coreos/etcd/raft/raftpb"
"github.com/coreos/etcd/raftsnap"
"github.com/coreos/etcd/version"
"go.uber.org/zap"
)
func TestServeRaftPrefix(t *testing.T) {
@ -151,7 +153,7 @@ func TestServeRaftPrefix(t *testing.T) {
req.Header.Set("X-Etcd-Cluster-ID", tt.clusterID)
req.Header.Set("X-Server-Version", version.Version)
rw := httptest.NewRecorder()
h := newPipelineHandler(NewNopTransporter(), tt.p, types.ID(0))
h := newPipelineHandler(&Transport{Logger: zap.NewExample()}, tt.p, types.ID(0))
// goroutine because the handler panics to disconnect on raft error
donec := make(chan struct{})

View File

@ -25,6 +25,7 @@ import (
"github.com/coreos/etcd/raft/raftpb"
"github.com/coreos/etcd/raftsnap"
"go.uber.org/zap"
"golang.org/x/time/rate"
)
@ -93,9 +94,13 @@ type Peer interface {
// A pipeline is a series of http clients that send http requests to the remote.
// It is only used when the stream has not been established.
type peer struct {
lg *zap.Logger
localID types.ID
// id of the remote raft peer node
id types.ID
r Raft
r Raft
status *peerStatus
@ -118,17 +123,27 @@ type peer struct {
stopc chan struct{}
}
func startPeer(transport *Transport, urls types.URLs, peerID types.ID, fs *stats.FollowerStats) *peer {
plog.Infof("starting peer %s...", peerID)
defer plog.Infof("started peer %s", peerID)
func startPeer(t *Transport, urls types.URLs, peerID types.ID, fs *stats.FollowerStats) *peer {
if t.Logger != nil {
t.Logger.Info("starting remote peer", zap.String("remote-peer-id", peerID.String()))
} else {
plog.Infof("starting peer %s...", peerID)
}
defer func() {
if t.Logger != nil {
t.Logger.Info("started remote peer", zap.String("remote-peer-id", peerID.String()))
} else {
plog.Infof("started peer %s", peerID)
}
}()
status := newPeerStatus(peerID)
status := newPeerStatus(t.Logger, peerID)
picker := newURLPicker(urls)
errorc := transport.ErrorC
r := transport.Raft
errorc := t.ErrorC
r := t.Raft
pipeline := &pipeline{
peerID: peerID,
tr: transport,
tr: t,
picker: picker,
status: status,
followerStats: fs,
@ -138,14 +153,16 @@ func startPeer(transport *Transport, urls types.URLs, peerID types.ID, fs *stats
pipeline.start()
p := &peer{
lg: t.Logger,
localID: t.ID,
id: peerID,
r: r,
status: status,
picker: picker,
msgAppV2Writer: startStreamWriter(peerID, status, fs, r),
writer: startStreamWriter(peerID, status, fs, r),
msgAppV2Writer: startStreamWriter(t.Logger, t.ID, peerID, status, fs, r),
writer: startStreamWriter(t.Logger, t.ID, peerID, status, fs, r),
pipeline: pipeline,
snapSender: newSnapshotSender(transport, picker, peerID, status),
snapSender: newSnapshotSender(t, picker, peerID, status),
recvc: make(chan raftpb.Message, recvBufSize),
propc: make(chan raftpb.Message, maxPendingProposals),
stopc: make(chan struct{}),
@ -158,7 +175,11 @@ func startPeer(transport *Transport, urls types.URLs, peerID types.ID, fs *stats
select {
case mm := <-p.recvc:
if err := r.Process(ctx, mm); err != nil {
plog.Warningf("failed to process raft message (%v)", err)
if t.Logger != nil {
t.Logger.Warn("failed to process Raft message", zap.Error(err))
} else {
plog.Warningf("failed to process raft message (%v)", err)
}
}
case <-p.stopc:
return
@ -183,24 +204,26 @@ func startPeer(transport *Transport, urls types.URLs, peerID types.ID, fs *stats
}()
p.msgAppV2Reader = &streamReader{
lg: t.Logger,
peerID: peerID,
typ: streamTypeMsgAppV2,
tr: transport,
tr: t,
picker: picker,
status: status,
recvc: p.recvc,
propc: p.propc,
rl: rate.NewLimiter(transport.DialRetryFrequency, 1),
rl: rate.NewLimiter(t.DialRetryFrequency, 1),
}
p.msgAppReader = &streamReader{
lg: t.Logger,
peerID: peerID,
typ: streamTypeMessage,
tr: transport,
tr: t,
picker: picker,
status: status,
recvc: p.recvc,
propc: p.propc,
rl: rate.NewLimiter(transport.DialRetryFrequency, 1),
rl: rate.NewLimiter(t.DialRetryFrequency, 1),
}
p.msgAppV2Reader.start()
@ -227,9 +250,32 @@ func (p *peer) send(m raftpb.Message) {
p.r.ReportSnapshot(m.To, raft.SnapshotFailure)
}
if p.status.isActive() {
plog.MergeWarningf("dropped internal raft message to %s since %s's sending buffer is full (bad/overloaded network)", p.id, name)
if p.lg != nil {
p.lg.Warn(
"dropped internal Raft message since sending buffer is full (overloaded network)",
zap.String("message-type", m.Type.String()),
zap.String("local-member-id", p.localID.String()),
zap.String("from", types.ID(m.From).String()),
zap.String("remote-peer-id", types.ID(p.id).String()),
zap.Bool("remote-peer-active", p.status.isActive()),
)
} else {
plog.MergeWarningf("dropped internal raft message to %s since %s's sending buffer is full (bad/overloaded network)", p.id, name)
}
} else {
if p.lg != nil {
p.lg.Warn(
"dropped internal Raft message since sending buffer is full (overloaded network)",
zap.String("message-type", m.Type.String()),
zap.String("local-member-id", p.localID.String()),
zap.String("from", types.ID(m.From).String()),
zap.String("remote-peer-id", types.ID(p.id).String()),
zap.Bool("remote-peer-active", p.status.isActive()),
)
} else {
plog.Debugf("dropped %s to %s since %s's sending buffer is full", m.Type, p.id, name)
}
}
plog.Debugf("dropped %s to %s since %s's sending buffer is full", m.Type, p.id, name)
sentFailures.WithLabelValues(types.ID(m.To).String()).Inc()
}
}
@ -250,7 +296,11 @@ func (p *peer) attachOutgoingConn(conn *outgoingConn) {
case streamTypeMessage:
ok = p.writer.attach(conn)
default:
plog.Panicf("unhandled stream type %s", conn.t)
if p.lg != nil {
p.lg.Panic("unknown stream type", zap.String("type", conn.t.String()))
} else {
plog.Panicf("unhandled stream type %s", conn.t)
}
}
if !ok {
conn.Close()
@ -279,8 +329,19 @@ func (p *peer) Resume() {
}
func (p *peer) stop() {
plog.Infof("stopping peer %s...", p.id)
defer plog.Infof("stopped peer %s", p.id)
if p.lg != nil {
p.lg.Info("stopping remote peer", zap.String("remote-peer-id", p.id.String()))
} else {
plog.Infof("stopping peer %s...", p.id)
}
defer func() {
if p.lg != nil {
p.lg.Info("stopped remote peer", zap.String("remote-peer-id", p.id.String()))
} else {
plog.Infof("stopped peer %s", p.id)
}
}()
close(p.stopc)
p.cancel()

View File

@ -15,11 +15,14 @@
package rafthttp
import (
"errors"
"fmt"
"sync"
"time"
"github.com/coreos/etcd/pkg/types"
"go.uber.org/zap"
)
type failureType struct {
@ -28,23 +31,26 @@ type failureType struct {
}
type peerStatus struct {
lg *zap.Logger
id types.ID
mu sync.Mutex // protect variables below
active bool
since time.Time
}
func newPeerStatus(id types.ID) *peerStatus {
return &peerStatus{
id: id,
}
func newPeerStatus(lg *zap.Logger, id types.ID) *peerStatus {
return &peerStatus{lg: lg, id: id}
}
func (s *peerStatus) activate() {
s.mu.Lock()
defer s.mu.Unlock()
if !s.active {
plog.Infof("peer %s became active", s.id)
if s.lg != nil {
s.lg.Info("peer became active", zap.String("peer-id", s.id.String()))
} else {
plog.Infof("peer %s became active", s.id)
}
s.active = true
s.since = time.Now()
}
@ -55,13 +61,19 @@ func (s *peerStatus) deactivate(failure failureType, reason string) {
defer s.mu.Unlock()
msg := fmt.Sprintf("failed to %s %s on %s (%s)", failure.action, s.id, failure.source, reason)
if s.active {
plog.Errorf(msg)
plog.Infof("peer %s became inactive", s.id)
if s.lg != nil {
s.lg.Warn("peer became inactive", zap.String("peer-id", s.id.String()), zap.Error(errors.New(msg)))
} else {
plog.Errorf(msg)
plog.Infof("peer %s became inactive", s.id)
}
s.active = false
s.since = time.Time{}
return
}
plog.Debugf(msg)
if s.lg != nil {
s.lg.Warn("peer deactivated again", zap.String("peer-id", s.id.String()), zap.Error(errors.New(msg)))
}
}
func (s *peerStatus) isActive() bool {

View File

@ -27,6 +27,8 @@ import (
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
"go.uber.org/zap"
)
const (
@ -64,13 +66,31 @@ func (p *pipeline) start() {
for i := 0; i < connPerPipeline; i++ {
go p.handle()
}
plog.Infof("started HTTP pipelining with peer %s", p.peerID)
if p.tr != nil && p.tr.Logger != nil {
p.tr.Logger.Info(
"started HTTP pipelining with remote peer",
zap.String("local-member-id", p.tr.ID.String()),
zap.String("remote-peer-id", p.peerID.String()),
)
} else {
plog.Infof("started HTTP pipelining with peer %s", p.peerID)
}
}
func (p *pipeline) stop() {
close(p.stopc)
p.wg.Wait()
plog.Infof("stopped HTTP pipelining with peer %s", p.peerID)
if p.tr != nil && p.tr.Logger != nil {
p.tr.Logger.Info(
"stopped HTTP pipelining with remote peer",
zap.String("local-member-id", p.tr.ID.String()),
zap.String("remote-peer-id", p.peerID.String()),
)
} else {
plog.Infof("stopped HTTP pipelining with peer %s", p.peerID)
}
}
func (p *pipeline) handle() {

View File

@ -24,6 +24,8 @@ import (
"testing"
"time"
"go.uber.org/zap"
"github.com/coreos/etcd/etcdserver/stats"
"github.com/coreos/etcd/pkg/testutil"
"github.com/coreos/etcd/pkg/types"
@ -301,7 +303,7 @@ func startTestPipeline(tr *Transport, picker *urlPicker) *pipeline {
peerID: types.ID(1),
tr: tr,
picker: picker,
status: newPeerStatus(types.ID(1)),
status: newPeerStatus(zap.NewExample(), types.ID(1)),
raft: &fakeRaft{},
followerStats: &stats.FollowerStats{},
errorc: make(chan error, 1),

View File

@ -18,6 +18,7 @@ import (
"time"
"github.com/xiang90/probing"
"go.uber.org/zap"
)
var (
@ -28,7 +29,7 @@ var (
statusErrorInterval = 5 * time.Second
)
func addPeerToProber(p probing.Prober, id string, us []string) {
func addPeerToProber(lg *zap.Logger, p probing.Prober, id string, us []string) {
hus := make([]string, len(us))
for i := range us {
hus[i] = us[i] + ProbingPrefix
@ -38,26 +39,49 @@ func addPeerToProber(p probing.Prober, id string, us []string) {
s, err := p.Status(id)
if err != nil {
plog.Errorf("failed to add peer %s into prober", id)
if lg != nil {
lg.Warn("failed to add peer into prober", zap.String("remote-peer-id", id))
} else {
plog.Errorf("failed to add peer %s into prober", id)
}
} else {
go monitorProbingStatus(s, id)
go monitorProbingStatus(lg, s, id)
}
}
func monitorProbingStatus(s probing.Status, id string) {
func monitorProbingStatus(lg *zap.Logger, s probing.Status, id string) {
// set the first interval short to log error early.
interval := statusErrorInterval
for {
select {
case <-time.After(interval):
if !s.Health() {
plog.Warningf("health check for peer %s could not connect: %v", id, s.Err())
if lg != nil {
lg.Warn(
"prober detected unhealthy status",
zap.String("remote-peer-id", id),
zap.Duration("rtt", s.SRTT()),
zap.Error(s.Err()),
)
} else {
plog.Warningf("health check for peer %s could not connect: %v", id, s.Err())
}
interval = statusErrorInterval
} else {
interval = statusMonitoringInterval
}
if s.ClockDiff() > time.Second {
plog.Warningf("the clock difference against peer %s is too high [%v > %v]", id, s.ClockDiff(), time.Second)
if lg != nil {
lg.Warn(
"prober found high clock drift",
zap.String("remote-peer-id", id),
zap.Duration("clock-drift", s.SRTT()),
zap.Duration("rtt", s.ClockDiff()),
zap.Error(s.Err()),
)
} else {
plog.Warningf("the clock difference against peer %s is too high [%v > %v]", id, s.ClockDiff(), time.Second)
}
}
rtts.WithLabelValues(id).Observe(s.SRTT().Seconds())
case <-s.StopNotify():

View File

@ -17,9 +17,13 @@ package rafthttp
import (
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/raft/raftpb"
"go.uber.org/zap"
)
type remote struct {
lg *zap.Logger
localID types.ID
id types.ID
status *peerStatus
pipeline *pipeline
@ -27,7 +31,7 @@ type remote struct {
func startRemote(tr *Transport, urls types.URLs, id types.ID) *remote {
picker := newURLPicker(urls)
status := newPeerStatus(id)
status := newPeerStatus(tr.Logger, id)
pipeline := &pipeline{
peerID: id,
tr: tr,
@ -39,6 +43,8 @@ func startRemote(tr *Transport, urls types.URLs, id types.ID) *remote {
pipeline.start()
return &remote{
lg: tr.Logger,
localID: tr.ID,
id: id,
status: status,
pipeline: pipeline,
@ -50,9 +56,32 @@ func (g *remote) send(m raftpb.Message) {
case g.pipeline.msgc <- m:
default:
if g.status.isActive() {
plog.MergeWarningf("dropped internal raft message to %s since sending buffer is full (bad/overloaded network)", g.id)
if g.lg != nil {
g.lg.Warn(
"dropped internal Raft message since sending buffer is full (overloaded network)",
zap.String("message-type", m.Type.String()),
zap.String("local-member-id", g.localID.String()),
zap.String("from", types.ID(m.From).String()),
zap.String("remote-peer-id", types.ID(g.id).String()),
zap.Bool("remote-peer-active", g.status.isActive()),
)
} else {
plog.MergeWarningf("dropped internal raft message to %s since sending buffer is full (bad/overloaded network)", g.id)
}
} else {
if g.lg != nil {
g.lg.Warn(
"dropped Raft message since sending buffer is full (overloaded network)",
zap.String("message-type", m.Type.String()),
zap.String("local-member-id", g.localID.String()),
zap.String("from", types.ID(m.From).String()),
zap.String("remote-peer-id", types.ID(g.id).String()),
zap.Bool("remote-peer-active", g.status.isActive()),
)
} else {
plog.Debugf("dropped %s to %s since sending buffer is full", m.Type, g.id)
}
}
plog.Debugf("dropped %s to %s since sending buffer is full", m.Type, g.id)
sentFailures.WithLabelValues(types.ID(m.To).String()).Inc()
}
}

View File

@ -27,6 +27,8 @@ import (
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raftsnap"
"go.uber.org/zap"
)
var (
@ -66,18 +68,35 @@ func (s *snapshotSender) stop() { close(s.stopc) }
func (s *snapshotSender) send(merged raftsnap.Message) {
m := merged.Message
body := createSnapBody(merged)
body := createSnapBody(s.tr.Logger, merged)
defer body.Close()
u := s.picker.pick()
req := createPostRequest(u, RaftSnapshotPrefix, body, "application/octet-stream", s.tr.URLs, s.from, s.cid)
plog.Infof("start to send database snapshot [index: %d, to %s]...", m.Snapshot.Metadata.Index, types.ID(m.To))
if s.tr.Logger != nil {
s.tr.Logger.Info(
"sending database snapshot",
zap.Uint64("snapshot-index", m.Snapshot.Metadata.Index),
zap.String("remote-peer-id", types.ID(m.To).String()),
)
} else {
plog.Infof("start to send database snapshot [index: %d, to %s]...", m.Snapshot.Metadata.Index, types.ID(m.To))
}
err := s.post(req)
defer merged.CloseWithError(err)
if err != nil {
plog.Warningf("database snapshot [index: %d, to: %s] failed to be sent out (%v)", m.Snapshot.Metadata.Index, types.ID(m.To), err)
if s.tr.Logger != nil {
s.tr.Logger.Warn(
"failed to send database snapshot",
zap.Uint64("snapshot-index", m.Snapshot.Metadata.Index),
zap.String("remote-peer-id", types.ID(m.To).String()),
zap.Error(err),
)
} else {
plog.Warningf("database snapshot [index: %d, to: %s] failed to be sent out (%v)", m.Snapshot.Metadata.Index, types.ID(m.To), err)
}
// errMemberRemoved is a critical error since a removed member should
// always be stopped. So we use reportCriticalError to report it to errorc.
@ -97,7 +116,16 @@ func (s *snapshotSender) send(merged raftsnap.Message) {
}
s.status.activate()
s.r.ReportSnapshot(m.To, raft.SnapshotFinish)
plog.Infof("database snapshot [index: %d, to: %s] sent out successfully", m.Snapshot.Metadata.Index, types.ID(m.To))
if s.tr.Logger != nil {
s.tr.Logger.Info(
"sent database snapshot",
zap.Uint64("snapshot-index", m.Snapshot.Metadata.Index),
zap.String("remote-peer-id", types.ID(m.To).String()),
)
} else {
plog.Infof("database snapshot [index: %d, to: %s] sent out successfully", m.Snapshot.Metadata.Index, types.ID(m.To))
}
sentBytes.WithLabelValues(types.ID(m.To).String()).Add(float64(merged.TotalSize))
}
@ -142,12 +170,16 @@ func (s *snapshotSender) post(req *http.Request) (err error) {
}
}
func createSnapBody(merged raftsnap.Message) io.ReadCloser {
func createSnapBody(lg *zap.Logger, merged raftsnap.Message) io.ReadCloser {
buf := new(bytes.Buffer)
enc := &messageEncoder{w: buf}
// encode raft message
if err := enc.encode(&merged.Message); err != nil {
plog.Panicf("encode message error (%v)", err)
if lg != nil {
lg.Panic("failed to encode message", zap.Error(err))
} else {
plog.Panicf("encode message error (%v)", err)
}
}
return &pioutil.ReaderAndCloser{

View File

@ -28,6 +28,8 @@ import (
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/raft/raftpb"
"github.com/coreos/etcd/raftsnap"
"go.uber.org/zap"
)
type strReaderCloser struct{ *strings.Reader }
@ -102,12 +104,12 @@ func testSnapshotSend(t *testing.T, sm *raftsnap.Message) (bool, []os.FileInfo)
r := &fakeRaft{}
tr := &Transport{pipelineRt: &http.Transport{}, ClusterID: types.ID(1), Raft: r}
ch := make(chan struct{}, 1)
h := &syncHandler{newSnapshotHandler(tr, r, raftsnap.New(d), types.ID(1)), ch}
h := &syncHandler{newSnapshotHandler(tr, r, raftsnap.New(zap.NewExample(), d), types.ID(1)), ch}
srv := httptest.NewServer(h)
defer srv.Close()
picker := mustNewURLPicker(t, []string{srv.URL})
snapsend := newSnapshotSender(tr, picker, types.ID(1), newPeerStatus(types.ID(1)))
snapsend := newSnapshotSender(tr, picker, types.ID(1), newPeerStatus(zap.NewExample(), types.ID(1)))
defer snapsend.stop()
snapsend.send(*sm)

View File

@ -25,8 +25,6 @@ import (
"sync"
"time"
"golang.org/x/time/rate"
"github.com/coreos/etcd/etcdserver/stats"
"github.com/coreos/etcd/pkg/httputil"
"github.com/coreos/etcd/pkg/transport"
@ -35,6 +33,8 @@ import (
"github.com/coreos/etcd/version"
"github.com/coreos/go-semver/semver"
"go.uber.org/zap"
"golang.org/x/time/rate"
)
const (
@ -105,7 +105,11 @@ type outgoingConn struct {
// streamWriter writes messages to the attached outgoingConn.
type streamWriter struct {
peerID types.ID
lg *zap.Logger
localID types.ID
peerID types.ID
status *peerStatus
fs *stats.FollowerStats
r Raft
@ -122,9 +126,13 @@ type streamWriter struct {
// startStreamWriter creates a streamWrite and starts a long running go-routine that accepts
// messages and writes to the attached outgoing connection.
func startStreamWriter(id types.ID, status *peerStatus, fs *stats.FollowerStats, r Raft) *streamWriter {
func startStreamWriter(lg *zap.Logger, local, id types.ID, status *peerStatus, fs *stats.FollowerStats, r Raft) *streamWriter {
w := &streamWriter{
peerID: id,
lg: lg,
localID: local,
peerID: id,
status: status,
fs: fs,
r: r,
@ -150,7 +158,15 @@ func (cw *streamWriter) run() {
defer tickc.Stop()
unflushed := 0
plog.Infof("started streaming with peer %s (writer)", cw.peerID)
if cw.lg != nil {
cw.lg.Info(
"started stream writer with remote peer",
zap.String("local-member-id", cw.localID.String()),
zap.String("remote-peer-id", cw.peerID.String()),
)
} else {
plog.Infof("started streaming with peer %s (writer)", cw.peerID)
}
for {
select {
@ -169,7 +185,16 @@ func (cw *streamWriter) run() {
sentFailures.WithLabelValues(cw.peerID.String()).Inc()
cw.close()
plog.Warningf("lost the TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
if cw.lg != nil {
cw.lg.Warn(
"lost TCP streaming connection with remote peer",
zap.String("stream-writer-type", t.String()),
zap.String("local-member-id", cw.localID.String()),
zap.String("remote-peer-id", cw.peerID.String()),
)
} else {
plog.Warningf("lost the TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
}
heartbeatc, msgc = nil, nil
case m := <-msgc:
@ -191,7 +216,16 @@ func (cw *streamWriter) run() {
cw.status.deactivate(failureType{source: t.String(), action: "write"}, err.Error())
cw.close()
plog.Warningf("lost the TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
if cw.lg != nil {
cw.lg.Warn(
"lost TCP streaming connection with remote peer",
zap.String("stream-writer-type", t.String()),
zap.String("local-member-id", cw.localID.String()),
zap.String("remote-peer-id", cw.peerID.String()),
)
} else {
plog.Warningf("lost the TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
}
heartbeatc, msgc = nil, nil
cw.r.ReportUnreachable(m.To)
sentFailures.WithLabelValues(cw.peerID.String()).Inc()
@ -216,15 +250,50 @@ func (cw *streamWriter) run() {
cw.mu.Unlock()
if closed {
plog.Warningf("closed an existing TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
if cw.lg != nil {
cw.lg.Warn(
"closed TCP streaming connection with remote peer",
zap.String("stream-writer-type", t.String()),
zap.String("local-member-id", cw.localID.String()),
zap.String("remote-peer-id", cw.peerID.String()),
)
} else {
plog.Warningf("closed an existing TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
}
}
if cw.lg != nil {
cw.lg.Warn(
"established TCP streaming connection with remote peer",
zap.String("stream-writer-type", t.String()),
zap.String("local-member-id", cw.localID.String()),
zap.String("remote-peer-id", cw.peerID.String()),
)
} else {
plog.Infof("established a TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
}
plog.Infof("established a TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
heartbeatc, msgc = tickc.C, cw.msgc
case <-cw.stopc:
if cw.close() {
plog.Infof("closed the TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
if cw.lg != nil {
cw.lg.Warn(
"closed TCP streaming connection with remote peer",
zap.String("stream-writer-type", t.String()),
zap.String("remote-peer-id", cw.peerID.String()),
)
} else {
plog.Infof("closed the TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
}
}
if cw.lg != nil {
cw.lg.Warn(
"stopped TCP streaming connection with remote peer",
zap.String("stream-writer-type", t.String()),
zap.String("remote-peer-id", cw.peerID.String()),
)
} else {
plog.Infof("stopped streaming with peer %s (writer)", cw.peerID)
}
plog.Infof("stopped streaming with peer %s (writer)", cw.peerID)
close(cw.done)
return
}
@ -248,7 +317,15 @@ func (cw *streamWriter) closeUnlocked() bool {
return false
}
if err := cw.closer.Close(); err != nil {
plog.Errorf("peer %s (writer) connection close error: %v", cw.peerID, err)
if cw.lg != nil {
cw.lg.Warn(
"failed to close connection with remote peer",
zap.String("remote-peer-id", cw.peerID.String()),
zap.Error(err),
)
} else {
plog.Errorf("peer %s (writer) connection close error: %v", cw.peerID, err)
}
}
if len(cw.msgc) > 0 {
cw.r.ReportUnreachable(uint64(cw.peerID))
@ -275,6 +352,8 @@ func (cw *streamWriter) stop() {
// streamReader is a long-running go-routine that dials to the remote stream
// endpoint and reads messages from the response body returned.
type streamReader struct {
lg *zap.Logger
peerID types.ID
typ streamType
@ -310,7 +389,18 @@ func (cr *streamReader) start() {
func (cr *streamReader) run() {
t := cr.typ
plog.Infof("started streaming with peer %s (%s reader)", cr.peerID, t)
if cr.lg != nil {
cr.lg.Info(
"started stream reader with remote peer",
zap.String("stream-reader-type", t.String()),
zap.String("local-member-id", cr.tr.ID.String()),
zap.String("remote-peer-id", cr.peerID.String()),
)
} else {
plog.Infof("started streaming with peer %s (%s reader)", cr.peerID, t)
}
for {
rc, err := cr.dial(t)
if err != nil {
@ -319,9 +409,28 @@ func (cr *streamReader) run() {
}
} else {
cr.status.activate()
plog.Infof("established a TCP streaming connection with peer %s (%s reader)", cr.peerID, cr.typ)
if cr.lg != nil {
cr.lg.Info(
"established TCP streaming connection with remote peer",
zap.String("stream-reader-type", cr.typ.String()),
zap.String("local-member-id", cr.tr.ID.String()),
zap.String("remote-peer-id", cr.peerID.String()),
)
} else {
plog.Infof("established a TCP streaming connection with peer %s (%s reader)", cr.peerID, cr.typ)
}
err = cr.decodeLoop(rc, t)
plog.Warningf("lost the TCP streaming connection with peer %s (%s reader)", cr.peerID, cr.typ)
if cr.lg != nil {
cr.lg.Warn(
"lost TCP streaming connection with remote peer",
zap.String("stream-reader-type", cr.typ.String()),
zap.String("local-member-id", cr.tr.ID.String()),
zap.String("remote-peer-id", cr.peerID.String()),
zap.Error(err),
)
} else {
plog.Warningf("lost the TCP streaming connection with peer %s (%s reader)", cr.peerID, cr.typ)
}
switch {
// all data is read out
case err == io.EOF:
@ -334,12 +443,31 @@ func (cr *streamReader) run() {
// Wait for a while before new dial attempt
err = cr.rl.Wait(cr.ctx)
if cr.ctx.Err() != nil {
plog.Infof("stopped streaming with peer %s (%s reader)", cr.peerID, t)
if cr.lg != nil {
cr.lg.Info(
"stopped stream reader with remote peer",
zap.String("stream-reader-type", t.String()),
zap.String("local-member-id", cr.tr.ID.String()),
zap.String("remote-peer-id", cr.peerID.String()),
)
} else {
plog.Infof("stopped streaming with peer %s (%s reader)", cr.peerID, t)
}
close(cr.done)
return
}
if err != nil {
plog.Errorf("streaming with peer %s (%s reader) rate limiter error: %v", cr.peerID, t, err)
if cr.lg != nil {
cr.lg.Warn(
"rate limit on stream reader with remote peer",
zap.String("stream-reader-type", t.String()),
zap.String("local-member-id", cr.tr.ID.String()),
zap.String("remote-peer-id", cr.peerID.String()),
zap.Error(err),
)
} else {
plog.Errorf("streaming with peer %s (%s reader) rate limiter error: %v", cr.peerID, t, err)
}
}
}
}
@ -353,7 +481,11 @@ func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error {
case streamTypeMessage:
dec = &messageDecoder{r: rc}
default:
plog.Panicf("unhandled stream type %s", t)
if cr.lg != nil {
cr.lg.Panic("unknown stream type", zap.String("type", t.String()))
} else {
plog.Panicf("unhandled stream type %s", t)
}
}
select {
case <-cr.ctx.Done():
@ -402,9 +534,32 @@ func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error {
case recvc <- m:
default:
if cr.status.isActive() {
plog.MergeWarningf("dropped internal raft message from %s since receiving buffer is full (overloaded network)", types.ID(m.From))
if cr.lg != nil {
cr.lg.Warn(
"dropped internal Raft message since receiving buffer is full (overloaded network)",
zap.String("message-type", m.Type.String()),
zap.String("local-member-id", cr.tr.ID.String()),
zap.String("from", types.ID(m.From).String()),
zap.String("remote-peer-id", types.ID(m.To).String()),
zap.Bool("remote-peer-active", cr.status.isActive()),
)
} else {
plog.MergeWarningf("dropped internal raft message from %s since receiving buffer is full (overloaded network)", types.ID(m.From))
}
} else {
if cr.lg != nil {
cr.lg.Warn(
"dropped Raft message since receiving buffer is full (overloaded network)",
zap.String("message-type", m.Type.String()),
zap.String("local-member-id", cr.tr.ID.String()),
zap.String("from", types.ID(m.From).String()),
zap.String("remote-peer-id", types.ID(m.To).String()),
zap.Bool("remote-peer-active", cr.status.isActive()),
)
} else {
plog.Debugf("dropped %s from %s since receiving buffer is full", m.Type, types.ID(m.From))
}
}
plog.Debugf("dropped %s from %s since receiving buffer is full", m.Type, types.ID(m.From))
recvFailures.WithLabelValues(types.ID(m.From).String()).Inc()
}
}
@ -467,12 +622,15 @@ func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) {
cr.picker.unreachable(u)
reportCriticalError(errMemberRemoved, cr.errorc)
return nil, errMemberRemoved
case http.StatusOK:
return resp.Body, nil
case http.StatusNotFound:
httputil.GracefulClose(resp)
cr.picker.unreachable(u)
return nil, fmt.Errorf("peer %s failed to find local node %s", cr.peerID, cr.tr.ID)
case http.StatusPreconditionFailed:
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
@ -484,15 +642,38 @@ func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) {
switch strings.TrimSuffix(string(b), "\n") {
case errIncompatibleVersion.Error():
plog.Errorf("request sent was ignored by peer %s (server version incompatible)", cr.peerID)
if cr.lg != nil {
cr.lg.Warn(
"request sent was ignored by remote peer due to server version incompatibility",
zap.String("local-member-id", cr.tr.ID.String()),
zap.String("remote-peer-id", cr.peerID.String()),
zap.Error(errIncompatibleVersion),
)
} else {
plog.Errorf("request sent was ignored by peer %s (server version incompatible)", cr.peerID)
}
return nil, errIncompatibleVersion
case errClusterIDMismatch.Error():
plog.Errorf("request sent was ignored (cluster ID mismatch: peer[%s]=%s, local=%s)",
cr.peerID, resp.Header.Get("X-Etcd-Cluster-ID"), cr.tr.ClusterID)
if cr.lg != nil {
cr.lg.Warn(
"request sent was ignored by remote peer due to cluster ID mismatch",
zap.String("remote-peer-id", cr.peerID.String()),
zap.String("remote-peer-cluster-id", resp.Header.Get("X-Etcd-Cluster-ID")),
zap.String("local-member-id", cr.tr.ID.String()),
zap.String("local-member-cluster-id", cr.tr.ClusterID.String()),
zap.Error(errClusterIDMismatch),
)
} else {
plog.Errorf("request sent was ignored (cluster ID mismatch: peer[%s]=%s, local=%s)",
cr.peerID, resp.Header.Get("X-Etcd-Cluster-ID"), cr.tr.ClusterID)
}
return nil, errClusterIDMismatch
default:
return nil, fmt.Errorf("unhandled error %q when precondition failed", string(b))
}
default:
httputil.GracefulClose(resp)
cr.picker.unreachable(u)
@ -503,7 +684,16 @@ func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) {
func (cr *streamReader) close() {
if cr.closer != nil {
if err := cr.closer.Close(); err != nil {
plog.Errorf("peer %s (reader) connection close error: %v", cr.peerID, err)
if cr.lg != nil {
cr.lg.Warn(
"failed to close remote peer connection",
zap.String("local-member-id", cr.tr.ID.String()),
zap.String("remote-peer-id", cr.peerID.String()),
zap.Error(err),
)
} else {
plog.Errorf("peer %s (reader) connection close error: %v", cr.peerID, err)
}
}
}
cr.closer = nil

View File

@ -33,6 +33,7 @@ import (
"github.com/coreos/etcd/version"
"github.com/coreos/go-semver/semver"
"go.uber.org/zap"
"golang.org/x/time/rate"
)
@ -40,7 +41,7 @@ import (
// to streamWriter. After that, streamWriter can use it to send messages
// continuously, and closes it when stopped.
func TestStreamWriterAttachOutgoingConn(t *testing.T) {
sw := startStreamWriter(types.ID(1), newPeerStatus(types.ID(1)), &stats.FollowerStats{}, &fakeRaft{})
sw := startStreamWriter(zap.NewExample(), types.ID(0), types.ID(1), newPeerStatus(zap.NewExample(), types.ID(1)), &stats.FollowerStats{}, &fakeRaft{})
// the expected initial state of streamWriter is not working
if _, ok := sw.writec(); ok {
t.Errorf("initial working status = %v, want false", ok)
@ -92,7 +93,7 @@ func TestStreamWriterAttachOutgoingConn(t *testing.T) {
// TestStreamWriterAttachBadOutgoingConn tests that streamWriter with bad
// outgoingConn will close the outgoingConn and fall back to non-working status.
func TestStreamWriterAttachBadOutgoingConn(t *testing.T) {
sw := startStreamWriter(types.ID(1), newPeerStatus(types.ID(1)), &stats.FollowerStats{}, &fakeRaft{})
sw := startStreamWriter(zap.NewExample(), types.ID(0), types.ID(1), newPeerStatus(zap.NewExample(), types.ID(1)), &stats.FollowerStats{}, &fakeRaft{})
defer sw.stop()
wfc := newFakeWriteFlushCloser(errors.New("blah"))
sw.attach(&outgoingConn{t: streamTypeMessage, Writer: wfc, Flusher: wfc, Closer: wfc})
@ -196,7 +197,7 @@ func TestStreamReaderStopOnDial(t *testing.T) {
picker: mustNewURLPicker(t, []string{"http://localhost:2380"}),
errorc: make(chan error, 1),
typ: streamTypeMessage,
status: newPeerStatus(types.ID(2)),
status: newPeerStatus(zap.NewExample(), types.ID(2)),
rl: rate.NewLimiter(rate.Every(100*time.Millisecond), 1),
}
tr.onResp = func() {
@ -303,7 +304,7 @@ func TestStream(t *testing.T) {
srv := httptest.NewServer(h)
defer srv.Close()
sw := startStreamWriter(types.ID(1), newPeerStatus(types.ID(1)), &stats.FollowerStats{}, &fakeRaft{})
sw := startStreamWriter(zap.NewExample(), types.ID(0), types.ID(1), newPeerStatus(zap.NewExample(), types.ID(1)), &stats.FollowerStats{}, &fakeRaft{})
defer sw.stop()
h.sw = sw
@ -315,7 +316,7 @@ func TestStream(t *testing.T) {
typ: tt.t,
tr: tr,
picker: picker,
status: newPeerStatus(types.ID(2)),
status: newPeerStatus(zap.NewExample(), types.ID(2)),
recvc: recvc,
propc: propc,
rl: rate.NewLimiter(rate.Every(100*time.Millisecond), 1),

View File

@ -30,6 +30,7 @@ import (
"github.com/coreos/pkg/capnslog"
"github.com/xiang90/probing"
"go.uber.org/zap"
"golang.org/x/time/rate"
)
@ -98,6 +99,8 @@ type Transporter interface {
// User needs to call Start before calling other functions, and call
// Stop when the Transport is no longer used.
type Transport struct {
Logger *zap.Logger
DialTimeout time.Duration // maximum duration before timing out dial of the request
// DialRetryFrequency defines the frequency of streamReader dial retrial attempts;
// a distinct rate limiter is created per every peer (default value: 10 events/sec)
@ -197,7 +200,15 @@ func (t *Transport) Send(msgs []raftpb.Message) {
continue
}
plog.Debugf("ignored message %s (sent to unknown peer %s)", m.Type, to)
if t.Logger != nil {
t.Logger.Debug(
"ignored message send request; unknown remote peer target",
zap.String("type", m.Type.String()),
zap.String("unknown-target-peer-id", to.String()),
)
} else {
plog.Debugf("ignored message %s (sent to unknown peer %s)", m.Type, to)
}
}
}
@ -268,7 +279,11 @@ func (t *Transport) AddRemote(id types.ID, us []string) {
}
urls, err := types.NewURLs(us)
if err != nil {
plog.Panicf("newURLs %+v should never fail: %+v", us, err)
if t.Logger != nil {
t.Logger.Panic("failed NewURLs", zap.Strings("urls", us), zap.Error(err))
} else {
plog.Panicf("newURLs %+v should never fail: %+v", us, err)
}
}
t.remotes[id] = startRemote(t, urls, id)
}
@ -285,13 +300,21 @@ func (t *Transport) AddPeer(id types.ID, us []string) {
}
urls, err := types.NewURLs(us)
if err != nil {
plog.Panicf("newURLs %+v should never fail: %+v", us, err)
if t.Logger != nil {
t.Logger.Panic("failed NewURLs", zap.Strings("urls", us), zap.Error(err))
} else {
plog.Panicf("newURLs %+v should never fail: %+v", us, err)
}
}
fs := t.LeaderStats.Follower(id.String())
t.peers[id] = startPeer(t, urls, id, fs)
addPeerToProber(t.prober, id.String(), us)
addPeerToProber(t.Logger, t.prober, id.String(), us)
plog.Infof("added peer %s", id)
if t.Logger != nil {
t.Logger.Info("added remote peer", zap.String("remote-peer-id", id.String()))
} else {
plog.Infof("added peer %s", id)
}
}
func (t *Transport) RemovePeer(id types.ID) {
@ -313,12 +336,21 @@ func (t *Transport) removePeer(id types.ID) {
if peer, ok := t.peers[id]; ok {
peer.stop()
} else {
plog.Panicf("unexpected removal of unknown peer '%d'", id)
if t.Logger != nil {
t.Logger.Panic("unexpected removal of unknown remote peer", zap.String("remote-peer-id", id.String()))
} else {
plog.Panicf("unexpected removal of unknown peer '%d'", id)
}
}
delete(t.peers, id)
delete(t.LeaderStats.Followers, id.String())
t.prober.Remove(id.String())
plog.Infof("removed peer %s", id)
if t.Logger != nil {
t.Logger.Info("removed remote peer", zap.String("remote-peer-id", id.String()))
} else {
plog.Infof("removed peer %s", id)
}
}
func (t *Transport) UpdatePeer(id types.ID, us []string) {
@ -330,13 +362,22 @@ func (t *Transport) UpdatePeer(id types.ID, us []string) {
}
urls, err := types.NewURLs(us)
if err != nil {
plog.Panicf("newURLs %+v should never fail: %+v", us, err)
if t.Logger != nil {
t.Logger.Panic("failed NewURLs", zap.Strings("urls", us), zap.Error(err))
} else {
plog.Panicf("newURLs %+v should never fail: %+v", us, err)
}
}
t.peers[id].update(urls)
t.prober.Remove(id.String())
addPeerToProber(t.prober, id.String(), us)
plog.Infof("updated peer %s", id)
addPeerToProber(t.Logger, t.prober, id.String(), us)
if t.Logger != nil {
t.Logger.Info("updated remote peer", zap.String("remote-peer-id", id.String()))
} else {
plog.Infof("updated peer %s", id)
}
}
func (t *Transport) ActiveSince(id types.ID) time.Time {
@ -425,7 +466,7 @@ func NewSnapTransporter(snapDir string) (Transporter, <-chan raftsnap.Message) {
}
func (s *snapTransporter) SendSnapshot(m raftsnap.Message) {
ss := raftsnap.New(s.snapDir)
ss := raftsnap.New(zap.NewExample(), s.snapDir)
ss.SaveDBFrom(m.ReadCloser, m.Snapshot.Metadata.Index+1)
m.CloseWithError(nil)
s.snapDoneC <- m

Some files were not shown because too many files have changed in this diff Show More