Merge branch 'main' into support-zap-console-encoding

dependabot/go_modules/go.uber.org/atomic-1.10.0
Arda Güçlü 2021-10-08 11:00:57 +03:00 committed by GitHub
commit d7fa8022e6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
93 changed files with 1481 additions and 623 deletions

View File

@ -8,8 +8,25 @@ The minimum recommended etcd versions to run in **production** are 3.2.28+, 3.3.
<hr>
## v3.3.26 (2021-10-03)
## v3.3.25 (2020 TBD)
See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.25...v3.3.26) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.
### Package `clientv3`
- Fix [auth token invalid after watch reconnects](https://github.com/etcd-io/etcd/pull/12264). Get AuthToken automatically when clientConn is ready.
### Package `fileutil`
- Fix [constant](https://github.com/etcd-io/etcd/pull/12440) for linux locking.
### Go
- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
<hr>
## v3.3.25 (2020-08-24)
See [code changes](https://github.com/etcd-io/etcd/compare/v3.3.23...v3.3.25) and [v3.3 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_3/) for any breaking changes.

View File

@ -7,6 +7,28 @@ The minimum recommended etcd versions to run in **production** are 3.2.28+, 3.3.
<hr>
## v3.4.17 (2021-10-03)
See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.16...v3.4.17) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes.
### `etcdctl`
- Fix [etcdctl check datascale command](https://github.com/etcd-io/etcd/pull/11896) to work with https endpoints.
### gRPC gateway
- Add [`MaxCallRecvMsgSize`](https://github.com/etcd-io/etcd/pull/13077) support for http client.
### Dependency
- Replace [`github.com/dgrijalva/jwt-go with github.com/golang-jwt/jwt'](https://github.com/etcd-io/etcd/pull/13378).
### Go
- Compile with [*Go 1.12.17*](https://golang.org/doc/devel/release.html#go1.12).
<hr>
## v3.4.16 (2021-05-11)
See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.15...v3.4.16) and [v3.4 upgrade guide](https://etcd.io/docs/latest/upgrades/upgrade_3_4/) for any breaking changes.
@ -141,6 +163,7 @@ See [code changes](https://github.com/etcd-io/etcd/compare/v3.4.10...v3.4.11) an
### Metrics, Monitoring
- Add [`os_fd_used` and `os_fd_limit` to monitor current OS file descriptors](https://github.com/etcd-io/etcd/pull/12214).
- Add [`etcd_disk_defrag_inflight`](https://github.com/etcd-io/etcd/pull/13397).
### Go

View File

@ -16,6 +16,13 @@ See [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0...v3.5.1) and
- Fix [self-signed-cert-validity parameter cannot be specified in the config file](https://github.com/etcd-io/etcd/pull/13237).
### etcd client
- [Fix etcd client sends invalid :authority header](https://github.com/etcd-io/etcd/issues/13192)
### package clientv3
- Endpoints self identify now as `etcd-endpoints://{id}/{authority}` where authority is based on first endpoint passed, for example `etcd-endpoints://0xc0009d8540/localhost:2079`
<hr>
@ -122,6 +129,7 @@ Note that any `etcd_debugging_*` metrics are experimental and subject to change.
- Add [`etcd_wal_write_bytes_total`](https://github.com/etcd-io/etcd/pull/11738).
- Add [`etcd_debugging_auth_revision`](https://github.com/etcd-io/etcd/commit/f14d2a087f7b0fd6f7980b95b5e0b945109c95f3).
- Add [`os_fd_used` and `os_fd_limit` to monitor current OS file descriptors](https://github.com/etcd-io/etcd/pull/12214).
- Add [`etcd_disk_defrag_inflight`](https://github.com/etcd-io/etcd/pull/13395).
### etcd server

View File

@ -31,4 +31,10 @@ See [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0...v3.6.0).
### etcd server
- Add [`etcd --log-format`](https://github.com/etcd-io/etcd/pull/13339) flag to support log format.
- Add [`etcd --log-format`](https://github.com/etcd-io/etcd/pull/13339) flag to support log format.
### Metrics, Monitoring
See [List of metrics](https://etcd.io/docs/latest/metrics/) for all metrics per release.
- Add [`etcd_disk_defrag_inflight`](https://github.com/etcd-io/etcd/pull/13371).

View File

@ -1,4 +1,5 @@
FROM k8s.gcr.io/build-image/debian-base:buster-v1.4.0
# TODO: move to k8s.gcr.io/build-image/debian-base:bullseye-v1.y.z when patched
FROM debian:bullseye-20210927
ADD etcd /usr/local/bin/
ADD etcdctl /usr/local/bin/

View File

@ -1,4 +1,5 @@
FROM k8s.gcr.io/build-image/debian-base-arm64:buster-v1.4.0
# TODO: move to k8s.gcr.io/build-image/debian-base-arm64:bullseye-1.y.z when patched
FROM arm64v8/debian:bullseye-20210927
ADD etcd /usr/local/bin/
ADD etcdctl /usr/local/bin/

View File

@ -1,4 +1,5 @@
FROM k8s.gcr.io/build-image/debian-base-ppc64le:buster-v1.4.0
# TODO: move to k8s.gcr.io/build-image/debian-base-ppc64le:bullseye-1.y.z when patched
FROM ppc64le/debian:bullseye-20210927
ADD etcd /usr/local/bin/
ADD etcdctl /usr/local/bin/

View File

@ -1,4 +1,5 @@
FROM k8s.gcr.io/build-image/debian-base-s390x:buster-v1.4.0
# TODO: move to k8s.gcr.io/build-image/debian-base-s390x:bullseye-1.y.z when patched
FROM s390x/debian:bullseye-20210927
ADD etcd /usr/local/bin/
ADD etcdctl /usr/local/bin/

View File

@ -44,16 +44,12 @@ func IsDirWriteable(dir string) error {
// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory
// does not exists. TouchDirAll also ensures the given directory is writable.
func TouchDirAll(dir string) error {
func TouchDirAll(lg *zap.Logger, dir string) error {
// If path is already a directory, MkdirAll does nothing and returns nil, so,
// first check if dir exist with an expected permission mode.
if Exist(dir) {
err := CheckDirPermission(dir, PrivateDirMode)
if err != nil {
lg, _ := zap.NewProduction()
if lg == nil {
lg = zap.NewExample()
}
lg.Warn("check file permission", zap.Error(err))
}
} else {
@ -70,8 +66,8 @@ func TouchDirAll(dir string) error {
// CreateDirAll is similar to TouchDirAll but returns error
// if the deepest directory was not empty.
func CreateDirAll(dir string) error {
err := TouchDirAll(dir)
func CreateDirAll(lg *zap.Logger, dir string) error {
err := TouchDirAll(lg, dir)
if err == nil {
var ns []string
ns, err = ReadDir(dir)

View File

@ -67,7 +67,7 @@ func TestCreateDirAll(t *testing.T) {
defer os.RemoveAll(tmpdir)
tmpdir2 := filepath.Join(tmpdir, "testdir")
if err = CreateDirAll(tmpdir2); err != nil {
if err = CreateDirAll(zaptest.NewLogger(t), tmpdir2); err != nil {
t.Fatal(err)
}
@ -75,7 +75,7 @@ func TestCreateDirAll(t *testing.T) {
t.Fatal(err)
}
if err = CreateDirAll(tmpdir2); err == nil || !strings.Contains(err.Error(), "to be empty, got") {
if err = CreateDirAll(zaptest.NewLogger(t), tmpdir2); err == nil || !strings.Contains(err.Error(), "to be empty, got") {
t.Fatalf("unexpected error %v", err)
}
}
@ -186,7 +186,7 @@ func TestDirPermission(t *testing.T) {
tmpdir2 := filepath.Join(tmpdir, "testpermission")
// create a new dir with 0700
if err = CreateDirAll(tmpdir2); err != nil {
if err = CreateDirAll(zaptest.NewLogger(t), tmpdir2); err != nil {
t.Fatal(err)
}
// check dir permission with mode different than created dir

View File

@ -192,7 +192,7 @@ func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertVali
)
return
}
err = fileutil.TouchDirAll(dirpath)
err = fileutil.TouchDirAll(lg, dirpath)
if err != nil {
if info.Logger != nil {
info.Logger.Warn(

View File

@ -297,9 +297,7 @@ func (c *Client) dial(creds grpccredentials.TransportCredentials, dopts ...grpc.
dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
defer cancel() // TODO: Is this right for cases where grpc.WithBlock() is not set on the dial options?
}
initialEndpoints := strings.Join(c.Endpoints(), ";")
target := fmt.Sprintf("%s://%p/#initially=[%s]", resolver.Schema, c, initialEndpoints)
target := fmt.Sprintf("%s://%p/%s", resolver.Schema, c, authority(c.endpoints[0]))
conn, err := grpc.DialContext(dctx, target, opts...)
if err != nil {
return nil, err
@ -307,6 +305,20 @@ func (c *Client) dial(creds grpccredentials.TransportCredentials, dopts ...grpc.
return conn, nil
}
func authority(endpoint string) string {
spl := strings.SplitN(endpoint, "://", 2)
if len(spl) < 2 {
if strings.HasPrefix(endpoint, "unix:") {
return endpoint[len("unix:"):]
}
if strings.HasPrefix(endpoint, "unixs:") {
return endpoint[len("unixs:"):]
}
return endpoint
}
return spl[1]
}
func (c *Client) credentialsForEndpoint(ep string) grpccredentials.TransportCredentials {
r := endpoint.RequiresCredentials(ep)
switch r {

View File

@ -68,7 +68,8 @@ func (s *syncer) SyncBase(ctx context.Context) (<-chan clientv3.GetResponse, cha
var key string
opts := []clientv3.OpOption{clientv3.WithLimit(batchLimit), clientv3.WithRev(s.rev)}
opts := []clientv3.OpOption{clientv3.WithLimit(batchLimit), clientv3.WithRev(s.rev),
clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend)}
if len(s.prefix) == 0 {
// If len(s.prefix) == 0, we will sync the entire key-value space.

View File

@ -118,7 +118,7 @@ func HandleBackup(withV3 bool, srcDir string, destDir string, srcWAL string, des
destWAL = datadir.ToWalDir(destDir)
}
if err := fileutil.CreateDirAll(destSnap); err != nil {
if err := fileutil.CreateDirAll(lg, destSnap); err != nil {
lg.Fatal("failed creating backup snapshot dir", zap.String("dest-snap", destSnap), zap.Error(err))
}

View File

@ -333,7 +333,7 @@ func (s *v3Manager) copyAndVerifyDB() error {
return err
}
if err := fileutil.CreateDirAll(s.snapDir); err != nil {
if err := fileutil.CreateDirAll(s.lg, s.snapDir); err != nil {
return err
}
@ -394,7 +394,7 @@ func (s *v3Manager) copyAndVerifyDB() error {
//
// TODO: This code ignores learners !!!
func (s *v3Manager) saveWALAndSnap() (*raftpb.HardState, error) {
if err := fileutil.CreateDirAll(s.walDir); err != nil {
if err := fileutil.CreateDirAll(s.lg, s.walDir); err != nil {
return nil, err
}

View File

@ -0,0 +1,69 @@
// Copyright 2021 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grpc_testing
import (
"context"
"sync"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
type GrpcRecorder struct {
mux sync.RWMutex
requests []RequestInfo
}
type RequestInfo struct {
FullMethod string
Authority string
}
func (ri *GrpcRecorder) UnaryInterceptor() grpc.UnaryServerInterceptor {
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
ri.record(toRequestInfo(ctx, info))
resp, err := handler(ctx, req)
return resp, err
}
}
func (ri *GrpcRecorder) RecordedRequests() []RequestInfo {
ri.mux.RLock()
defer ri.mux.RUnlock()
reqs := make([]RequestInfo, len(ri.requests))
copy(reqs, ri.requests)
return reqs
}
func toRequestInfo(ctx context.Context, info *grpc.UnaryServerInfo) RequestInfo {
req := RequestInfo{
FullMethod: info.FullMethod,
}
md, ok := metadata.FromIncomingContext(ctx)
if ok {
as := md.Get(":authority")
if len(as) != 0 {
req.Authority = as[0]
}
}
return req
}
func (ri *GrpcRecorder) record(r RequestInfo) {
ri.mux.Lock()
defer ri.mux.Unlock()
ri.requests = append(ri.requests, r)
}

View File

@ -148,20 +148,31 @@ func urlsEqual(ctx context.Context, lg *zap.Logger, a []url.URL, b []url.URL) (b
if len(a) != len(b) {
return false, fmt.Errorf("len(%q) != len(%q)", urlsToStrings(a), urlsToStrings(b))
}
sort.Sort(types.URLs(a))
sort.Sort(types.URLs(b))
var needResolve bool
for i := range a {
if !reflect.DeepEqual(a[i], b[i]) {
needResolve = true
break
}
}
if !needResolve {
return true, nil
}
// If URLs are not equal, try to resolve it and compare again.
urls, err := resolveTCPAddrs(ctx, lg, [][]url.URL{a, b})
if err != nil {
return false, err
}
preva, prevb := a, b
a, b = urls[0], urls[1]
sort.Sort(types.URLs(a))
sort.Sort(types.URLs(b))
for i := range a {
if !reflect.DeepEqual(a[i], b[i]) {
return false, fmt.Errorf("%q(resolved from %q) != %q(resolved from %q)",
a[i].String(), preva[i].String(),
b[i].String(), prevb[i].String(),
)
return false, fmt.Errorf("resolved urls: %q != %q", a[i].String(), b[i].String())
}
}
return true, nil
@ -174,21 +185,13 @@ func URLStringsEqual(ctx context.Context, lg *zap.Logger, a []string, b []string
if len(a) != len(b) {
return false, fmt.Errorf("len(%q) != len(%q)", a, b)
}
urlsA := make([]url.URL, 0)
for _, str := range a {
u, err := url.Parse(str)
if err != nil {
return false, fmt.Errorf("failed to parse %q", str)
}
urlsA = append(urlsA, *u)
urlsA, err := stringsToURLs(a)
if err != nil {
return false, err
}
urlsB := make([]url.URL, 0)
for _, str := range b {
u, err := url.Parse(str)
if err != nil {
return false, fmt.Errorf("failed to parse %q", str)
}
urlsB = append(urlsB, *u)
urlsB, err := stringsToURLs(b)
if err != nil {
return false, err
}
if lg == nil {
lg, _ = zap.NewProduction()
@ -207,6 +210,18 @@ func urlsToStrings(us []url.URL) []string {
return rs
}
func stringsToURLs(us []string) ([]url.URL, error) {
urls := make([]url.URL, 0, len(us))
for _, str := range us {
u, err := url.Parse(str)
if err != nil {
return nil, fmt.Errorf("failed to parse string to URL: %q", str)
}
urls = append(urls, *u)
}
return urls, nil
}
func IsNetworkTimeoutError(err error) bool {
nerr, ok := err.(net.Error)
return ok && nerr.Timeout()

View File

@ -17,6 +17,7 @@ package netutil
import (
"context"
"errors"
"fmt"
"net"
"net/url"
"reflect"
@ -166,113 +167,133 @@ func TestURLsEqual(t *testing.T) {
}
tests := []struct {
n int
a []url.URL
b []url.URL
expect bool
err error
}{
{
n: 0,
a: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}},
b: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}},
expect: true,
},
{
n: 1,
a: []url.URL{{Scheme: "http", Host: "example.com:2379"}},
b: []url.URL{{Scheme: "http", Host: "10.0.10.1:2379"}},
expect: true,
},
{
n: 2,
a: []url.URL{{Scheme: "http", Host: "example.com:2379"}},
b: []url.URL{{Scheme: "https", Host: "10.0.10.1:2379"}},
expect: false,
err: errors.New(`"http://10.0.10.1:2379"(resolved from "http://example.com:2379") != "https://10.0.10.1:2379"(resolved from "https://10.0.10.1:2379")`),
err: errors.New(`resolved urls: "http://10.0.10.1:2379" != "https://10.0.10.1:2379"`),
},
{
n: 3,
a: []url.URL{{Scheme: "https", Host: "example.com:2379"}},
b: []url.URL{{Scheme: "http", Host: "10.0.10.1:2379"}},
expect: false,
err: errors.New(`"https://10.0.10.1:2379"(resolved from "https://example.com:2379") != "http://10.0.10.1:2379"(resolved from "http://10.0.10.1:2379")`),
err: errors.New(`resolved urls: "https://10.0.10.1:2379" != "http://10.0.10.1:2379"`),
},
{
n: 4,
a: []url.URL{{Scheme: "unix", Host: "abc:2379"}},
b: []url.URL{{Scheme: "unix", Host: "abc:2379"}},
expect: true,
},
{
n: 5,
a: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
b: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
expect: true,
},
{
n: 6,
a: []url.URL{{Scheme: "http", Host: "example.com:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
b: []url.URL{{Scheme: "http", Host: "example.com:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
expect: true,
},
{
n: 7,
a: []url.URL{{Scheme: "http", Host: "10.0.10.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
b: []url.URL{{Scheme: "http", Host: "example.com:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
expect: true,
},
{
n: 8,
a: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}},
b: []url.URL{{Scheme: "http", Host: "127.0.0.1:2380"}},
expect: false,
err: errors.New(`"http://127.0.0.1:2379"(resolved from "http://127.0.0.1:2379") != "http://127.0.0.1:2380"(resolved from "http://127.0.0.1:2380")`),
err: errors.New(`resolved urls: "http://127.0.0.1:2379" != "http://127.0.0.1:2380"`),
},
{
n: 9,
a: []url.URL{{Scheme: "http", Host: "example.com:2380"}},
b: []url.URL{{Scheme: "http", Host: "10.0.10.1:2379"}},
expect: false,
err: errors.New(`"http://10.0.10.1:2380"(resolved from "http://example.com:2380") != "http://10.0.10.1:2379"(resolved from "http://10.0.10.1:2379")`),
err: errors.New(`resolved urls: "http://10.0.10.1:2380" != "http://10.0.10.1:2379"`),
},
{
n: 10,
a: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}},
b: []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}},
expect: false,
err: errors.New(`"http://127.0.0.1:2379"(resolved from "http://127.0.0.1:2379") != "http://10.0.0.1:2379"(resolved from "http://10.0.0.1:2379")`),
err: errors.New(`resolved urls: "http://127.0.0.1:2379" != "http://10.0.0.1:2379"`),
},
{
n: 11,
a: []url.URL{{Scheme: "http", Host: "example.com:2379"}},
b: []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}},
expect: false,
err: errors.New(`"http://10.0.10.1:2379"(resolved from "http://example.com:2379") != "http://10.0.0.1:2379"(resolved from "http://10.0.0.1:2379")`),
err: errors.New(`resolved urls: "http://10.0.10.1:2379" != "http://10.0.0.1:2379"`),
},
{
n: 12,
a: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
b: []url.URL{{Scheme: "http", Host: "127.0.0.1:2380"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
expect: false,
err: errors.New(`"http://127.0.0.1:2379"(resolved from "http://127.0.0.1:2379") != "http://127.0.0.1:2380"(resolved from "http://127.0.0.1:2380")`),
err: errors.New(`resolved urls: "http://127.0.0.1:2379" != "http://127.0.0.1:2380"`),
},
{
n: 13,
a: []url.URL{{Scheme: "http", Host: "example.com:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
b: []url.URL{{Scheme: "http", Host: "127.0.0.1:2380"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
expect: false,
err: errors.New(`"http://10.0.10.1:2379"(resolved from "http://example.com:2379") != "http://127.0.0.1:2380"(resolved from "http://127.0.0.1:2380")`),
err: errors.New(`resolved urls: "http://10.0.10.1:2379" != "http://127.0.0.1:2380"`),
},
{
n: 14,
a: []url.URL{{Scheme: "http", Host: "127.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
b: []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
expect: false,
err: errors.New(`"http://127.0.0.1:2379"(resolved from "http://127.0.0.1:2379") != "http://10.0.0.1:2379"(resolved from "http://10.0.0.1:2379")`),
err: errors.New(`resolved urls: "http://127.0.0.1:2379" != "http://10.0.0.1:2379"`),
},
{
n: 15,
a: []url.URL{{Scheme: "http", Host: "example.com:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
b: []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
expect: false,
err: errors.New(`"http://10.0.10.1:2379"(resolved from "http://example.com:2379") != "http://10.0.0.1:2379"(resolved from "http://10.0.0.1:2379")`),
err: errors.New(`resolved urls: "http://10.0.10.1:2379" != "http://10.0.0.1:2379"`),
},
{
n: 16,
a: []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}},
b: []url.URL{{Scheme: "http", Host: "10.0.0.1:2379"}, {Scheme: "http", Host: "127.0.0.1:2380"}},
expect: false,
err: errors.New(`len(["http://10.0.0.1:2379"]) != len(["http://10.0.0.1:2379" "http://127.0.0.1:2380"])`),
},
{
n: 17,
a: []url.URL{{Scheme: "http", Host: "first.com:2379"}, {Scheme: "http", Host: "second.com:2380"}},
b: []url.URL{{Scheme: "http", Host: "10.0.11.1:2379"}, {Scheme: "http", Host: "10.0.11.2:2380"}},
expect: true,
},
{
n: 18,
a: []url.URL{{Scheme: "http", Host: "second.com:2380"}, {Scheme: "http", Host: "first.com:2379"}},
b: []url.URL{{Scheme: "http", Host: "10.0.11.1:2379"}, {Scheme: "http", Host: "10.0.11.2:2380"}},
expect: true,
@ -282,21 +303,43 @@ func TestURLsEqual(t *testing.T) {
for i, test := range tests {
result, err := urlsEqual(context.TODO(), zap.NewExample(), test.a, test.b)
if result != test.expect {
t.Errorf("#%d: a:%v b:%v, expected %v but %v", i, test.a, test.b, test.expect, result)
t.Errorf("idx=%d #%d: a:%v b:%v, expected %v but %v", i, test.n, test.a, test.b, test.expect, result)
}
if test.err != nil {
if err.Error() != test.err.Error() {
t.Errorf("#%d: err expected %v but %v", i, test.err, err)
t.Errorf("idx=%d #%d: err expected %v but %v", i, test.n, test.err, err)
}
}
}
}
func TestURLStringsEqual(t *testing.T) {
result, err := URLStringsEqual(context.TODO(), zap.NewExample(), []string{"http://127.0.0.1:8080"}, []string{"http://127.0.0.1:8080"})
if !result {
t.Errorf("unexpected result %v", result)
defer func() { resolveTCPAddr = resolveTCPAddrDefault }()
errOnResolve := func(ctx context.Context, addr string) (*net.TCPAddr, error) {
return nil, fmt.Errorf("unexpected attempt to resolve: %q", addr)
}
if err != nil {
t.Errorf("unexpected error %v", err)
cases := []struct {
urlsA []string
urlsB []string
resolver func(ctx context.Context, addr string) (*net.TCPAddr, error)
}{
{[]string{"http://127.0.0.1:8080"}, []string{"http://127.0.0.1:8080"}, resolveTCPAddrDefault},
{[]string{
"http://host1:8080",
"http://host2:8080",
}, []string{
"http://host1:8080",
"http://host2:8080",
}, errOnResolve},
}
for idx, c := range cases {
t.Logf("TestURLStringsEqual, case #%d", idx)
resolveTCPAddr = c.resolver
result, err := URLStringsEqual(context.TODO(), zap.NewExample(), c.urlsA, c.urlsB)
if !result {
t.Errorf("unexpected result %v", result)
}
if err != nil {
t.Errorf("unexpected error %v", err)
}
}
}

View File

@ -539,7 +539,7 @@ func (e *Etcd) servePeers() (err error) {
for _, p := range e.Peers {
u := p.Listener.Addr().String()
gs := v3rpc.Server(e.Server, peerTLScfg)
gs := v3rpc.Server(e.Server, peerTLScfg, nil)
m := cmux.New(p.Listener)
go gs.Serve(m.Match(cmux.HTTP2()))
srv := &http.Server{

View File

@ -110,7 +110,7 @@ func (sctx *serveCtx) serve(
}()
if sctx.insecure {
gs = v3rpc.Server(s, nil, gopts...)
gs = v3rpc.Server(s, nil, nil, gopts...)
v3electionpb.RegisterElectionServer(gs, servElection)
v3lockpb.RegisterLockServer(gs, servLock)
if sctx.serviceRegister != nil {
@ -148,7 +148,7 @@ func (sctx *serveCtx) serve(
if tlsErr != nil {
return tlsErr
}
gs = v3rpc.Server(s, tlscfg, gopts...)
gs = v3rpc.Server(s, tlscfg, nil, gopts...)
v3electionpb.RegisterElectionServer(gs, servElection)
v3lockpb.RegisterLockServer(gs, servLock)
if sctx.serviceRegister != nil {

View File

@ -275,7 +275,7 @@ func startProxy(cfg *config) error {
}
cfg.ec.Dir = filepath.Join(cfg.ec.Dir, "proxy")
err = fileutil.TouchDirAll(cfg.ec.Dir)
err = fileutil.TouchDirAll(lg, cfg.ec.Dir)
if err != nil {
return err
}

View File

@ -250,30 +250,16 @@ func (p *peer) send(m raftpb.Message) {
if isMsgSnap(m) {
p.r.ReportSnapshot(m.To, raft.SnapshotFailure)
}
if p.status.isActive() {
if p.lg != nil {
p.lg.Warn(
"dropped internal Raft message since sending buffer is full (overloaded network)",
zap.String("message-type", m.Type.String()),
zap.String("local-member-id", p.localID.String()),
zap.String("from", types.ID(m.From).String()),
zap.String("remote-peer-id", p.id.String()),
zap.String("remote-peer-name", name),
zap.Bool("remote-peer-active", p.status.isActive()),
)
}
} else {
if p.lg != nil {
p.lg.Warn(
"dropped internal Raft message since sending buffer is full (overloaded network)",
zap.String("message-type", m.Type.String()),
zap.String("local-member-id", p.localID.String()),
zap.String("from", types.ID(m.From).String()),
zap.String("remote-peer-id", p.id.String()),
zap.String("remote-peer-name", name),
zap.Bool("remote-peer-active", p.status.isActive()),
)
}
if p.lg != nil {
p.lg.Warn(
"dropped internal Raft message since sending buffer is full",
zap.String("message-type", m.Type.String()),
zap.String("local-member-id", p.localID.String()),
zap.String("from", types.ID(m.From).String()),
zap.String("remote-peer-id", p.id.String()),
zap.String("remote-peer-name", name),
zap.Bool("remote-peer-active", p.status.isActive()),
)
}
sentFailures.WithLabelValues(types.ID(m.To).String()).Inc()
}

View File

@ -36,19 +36,21 @@ const (
maxSendBytes = math.MaxInt32
)
func Server(s *etcdserver.EtcdServer, tls *tls.Config, gopts ...grpc.ServerOption) *grpc.Server {
func Server(s *etcdserver.EtcdServer, tls *tls.Config, interceptor grpc.UnaryServerInterceptor, gopts ...grpc.ServerOption) *grpc.Server {
var opts []grpc.ServerOption
opts = append(opts, grpc.CustomCodec(&codec{}))
if tls != nil {
bundle := credentials.NewBundle(credentials.Config{TLSConfig: tls})
opts = append(opts, grpc.Creds(bundle.TransportCredentials()))
}
chainUnaryInterceptors := []grpc.UnaryServerInterceptor{
newLogUnaryInterceptor(s),
newUnaryInterceptor(s),
grpc_prometheus.UnaryServerInterceptor,
}
if interceptor != nil {
chainUnaryInterceptors = append(chainUnaryInterceptors, interceptor)
}
chainStreamInterceptors := []grpc.StreamServerInterceptor{
newStreamInterceptor(s),

View File

@ -76,7 +76,7 @@ func newLogUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerIntercepto
startTime := time.Now()
resp, err := handler(ctx, req)
lg := s.Logger()
if lg != nil { // acquire stats if debug level is enabled or request is expensive
if lg != nil { // acquire stats if debug level is enabled or RequestInfo is expensive
defer logUnaryRequestStats(ctx, lg, s.Cfg.WarningUnaryRequestDuration, info, startTime, req, resp)
}
return resp, err

View File

@ -387,6 +387,11 @@ func (a *applierV3backend) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.Ra
// sorted by keys in lexiographically ascending order,
// sort ASCEND by default only when target is not 'KEY'
sortOrder = pb.RangeRequest_ASCEND
} else if r.SortTarget == pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_ASCEND {
// Since current mvcc.Range implementation returns results
// sorted by keys in lexiographically ascending order,
// don't re-sort when target is 'KEY' and order is ASCEND
sortOrder = pb.RangeRequest_NONE
}
if sortOrder != pb.RangeRequest_NONE {
var sorter sort.Interface

View File

@ -49,7 +49,6 @@ import (
)
func bootstrap(cfg config.ServerConfig) (b *bootstrappedServer, err error) {
st := v2store.New(StoreClusterPrefix, StoreKeysPrefix)
if cfg.MaxRequestBytes > recommendedMaxRequestBytes {
cfg.Logger.Warn(
@ -61,68 +60,126 @@ func bootstrap(cfg config.ServerConfig) (b *bootstrappedServer, err error) {
)
}
if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil {
if terr := fileutil.TouchDirAll(cfg.Logger, cfg.DataDir); terr != nil {
return nil, fmt.Errorf("cannot access data directory: %v", terr)
}
haveWAL := wal.Exist(cfg.WALDir())
ss := bootstrapSnapshot(cfg)
be, ci, beExist, beHooks, err := bootstrapBackend(cfg)
if err != nil {
return nil, err
if terr := fileutil.TouchDirAll(cfg.Logger, cfg.MemberDir()); terr != nil {
return nil, fmt.Errorf("cannot access member directory: %v", terr)
}
defer func() {
if err != nil {
be.Close()
}
}()
ss := bootstrapSnapshot(cfg)
prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.PeerDialTimeout())
if err != nil {
return nil, err
}
switch {
case !haveWAL && !cfg.NewCluster:
b, err = bootstrapExistingClusterNoWAL(cfg, prt, st, be)
case !haveWAL && cfg.NewCluster:
b, err = bootstrapNewClusterNoWAL(cfg, prt, st, be)
case haveWAL:
b, err = bootstrapWithWAL(cfg, st, be, ss, beExist, beHooks, ci)
default:
be.Close()
return nil, fmt.Errorf("unsupported bootstrap config")
}
haveWAL := wal.Exist(cfg.WALDir())
st := v2store.New(StoreClusterPrefix, StoreKeysPrefix)
backend, err := bootstrapBackend(cfg, haveWAL, st, ss)
if err != nil {
return nil, err
}
var (
bwal *bootstrappedWAL
)
if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil {
return nil, fmt.Errorf("cannot access member directory: %v", terr)
if haveWAL {
if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil {
return nil, fmt.Errorf("cannot write to WAL directory: %v", err)
}
bwal = bootstrapWALFromSnapshot(cfg, backend.snapshot)
}
b.prt = prt
b.ci = ci
b.st = st
b.be = be
b.ss = ss
b.beHooks = beHooks
return b, nil
cluster, err := bootstrapCluster(cfg, bwal, prt)
if err != nil {
backend.Close()
return nil, err
}
s, err := bootstrapStorage(cfg, st, backend, bwal, cluster)
if err != nil {
backend.Close()
return nil, err
}
err = cluster.Finalize(cfg, s)
if err != nil {
backend.Close()
return nil, err
}
raft := bootstrapRaft(cfg, cluster, s.wal)
return &bootstrappedServer{
prt: prt,
ss: ss,
storage: s,
cluster: cluster,
raft: raft,
}, nil
}
type bootstrappedServer struct {
storage *bootstrappedStorage
cluster *bootstrapedCluster
raft *bootstrappedRaft
remotes []*membership.Member
prt http.RoundTripper
ci cindex.ConsistentIndexer
st v2store.Store
be backend.Backend
ss *snap.Snapshotter
beHooks *serverstorage.BackendHooks
}
func (s *bootstrappedServer) Close() {
s.storage.Close()
}
type bootstrappedStorage struct {
backend *bootstrappedBackend
wal *bootstrappedWAL
st v2store.Store
}
func (s *bootstrappedStorage) Close() {
s.backend.Close()
}
type bootstrappedBackend struct {
beHooks *serverstorage.BackendHooks
be backend.Backend
ci cindex.ConsistentIndexer
beExist bool
snapshot *raftpb.Snapshot
}
func (s *bootstrappedBackend) Close() {
s.be.Close()
}
type bootstrapedCluster struct {
remotes []*membership.Member
cl *membership.RaftCluster
nodeID types.ID
}
type bootstrappedRaft struct {
lg *zap.Logger
heartbeat time.Duration
peers []raft.Peer
config *raft.Config
storage *raft.MemoryStorage
}
func bootstrapStorage(cfg config.ServerConfig, st v2store.Store, be *bootstrappedBackend, wal *bootstrappedWAL, cl *bootstrapedCluster) (b *bootstrappedStorage, err error) {
if wal == nil {
wal = bootstrapNewWAL(cfg, cl)
}
return &bootstrappedStorage{
backend: be,
st: st,
wal: wal,
}, nil
}
func bootstrapSnapshot(cfg config.ServerConfig) *snap.Snapshotter {
if err := fileutil.TouchDirAll(cfg.SnapDir()); err != nil {
if err := fileutil.TouchDirAll(cfg.Logger, cfg.SnapDir()); err != nil {
cfg.Logger.Fatal(
"failed to create snapshot directory",
zap.String("path", cfg.SnapDir()),
@ -142,11 +199,11 @@ func bootstrapSnapshot(cfg config.ServerConfig) *snap.Snapshotter {
return snap.New(cfg.Logger, cfg.SnapDir())
}
func bootstrapBackend(cfg config.ServerConfig) (be backend.Backend, ci cindex.ConsistentIndexer, beExist bool, beHooks *serverstorage.BackendHooks, err error) {
beExist = fileutil.Exist(cfg.BackendPath())
ci = cindex.NewConsistentIndex(nil)
beHooks = serverstorage.NewBackendHooks(cfg.Logger, ci)
be = serverstorage.OpenBackend(cfg, beHooks)
func bootstrapBackend(cfg config.ServerConfig, haveWAL bool, st v2store.Store, ss *snap.Snapshotter) (backend *bootstrappedBackend, err error) {
beExist := fileutil.Exist(cfg.BackendPath())
ci := cindex.NewConsistentIndex(nil)
beHooks := serverstorage.NewBackendHooks(cfg.Logger, ci)
be := serverstorage.OpenBackend(cfg, beHooks)
defer func() {
if err != nil && be != nil {
be.Close()
@ -157,20 +214,35 @@ func bootstrapBackend(cfg config.ServerConfig) (be backend.Backend, ci cindex.Co
if cfg.ExperimentalBootstrapDefragThresholdMegabytes != 0 {
err = maybeDefragBackend(cfg, be)
if err != nil {
return nil, nil, false, nil, err
return nil, err
}
}
cfg.Logger.Debug("restore consistentIndex", zap.Uint64("index", ci.ConsistentIndex()))
// TODO(serathius): Implement schema setup in fresh storage
var (
snapshot *raftpb.Snapshot
)
if haveWAL {
snapshot, be, err = recoverSnapshot(cfg, st, be, beExist, beHooks, ci, ss)
if err != nil {
return nil, err
}
}
if beExist {
err = schema.Validate(cfg.Logger, be.BatchTx())
if err != nil {
cfg.Logger.Error("Failed to validate schema", zap.Error(err))
return nil, nil, false, nil, err
return nil, err
}
}
return be, ci, beExist, beHooks, nil
return &bootstrappedBackend{
beHooks: beHooks,
be: be,
ci: ci,
beExist: beExist,
snapshot: snapshot,
}, nil
}
func maybeDefragBackend(cfg config.ServerConfig, be backend.Backend) error {
@ -192,7 +264,24 @@ func maybeDefragBackend(cfg config.ServerConfig, be backend.Backend) error {
return be.Defrag()
}
func bootstrapExistingClusterNoWAL(cfg config.ServerConfig, prt http.RoundTripper, st v2store.Store, be backend.Backend) (*bootstrappedServer, error) {
func bootstrapCluster(cfg config.ServerConfig, bwal *bootstrappedWAL, prt http.RoundTripper) (c *bootstrapedCluster, err error) {
switch {
case bwal == nil && !cfg.NewCluster:
c, err = bootstrapExistingClusterNoWAL(cfg, prt)
case bwal == nil && cfg.NewCluster:
c, err = bootstrapNewClusterNoWAL(cfg, prt)
case bwal != nil && bwal.haveWAL:
c, err = bootstrapClusterWithWAL(cfg, bwal.meta)
default:
return nil, fmt.Errorf("unsupported bootstrap config")
}
if err != nil {
return nil, err
}
return c, nil
}
func bootstrapExistingClusterNoWAL(cfg config.ServerConfig, prt http.RoundTripper) (*bootstrapedCluster, error) {
if err := cfg.VerifyJoinExisting(); err != nil {
return nil, err
}
@ -213,17 +302,15 @@ func bootstrapExistingClusterNoWAL(cfg config.ServerConfig, prt http.RoundTrippe
remotes := existingCluster.Members()
cl.SetID(types.ID(0), existingCluster.ID())
cl.SetStore(st)
cl.SetBackend(schema.NewMembershipBackend(cfg.Logger, be))
br := bootstrapRaftFromCluster(cfg, cl, nil)
cl.SetID(br.wal.id, existingCluster.ID())
return &bootstrappedServer{
raft: br,
member := cl.MemberByName(cfg.Name)
return &bootstrapedCluster{
remotes: remotes,
cl: cl,
nodeID: member.ID,
}, nil
}
func bootstrapNewClusterNoWAL(cfg config.ServerConfig, prt http.RoundTripper, st v2store.Store, be backend.Backend) (*bootstrappedServer, error) {
func bootstrapNewClusterNoWAL(cfg config.ServerConfig, prt http.RoundTripper) (*bootstrapedCluster, error) {
if err := cfg.VerifyBootstrap(); err != nil {
return nil, err
}
@ -253,42 +340,43 @@ func bootstrapNewClusterNoWAL(cfg config.ServerConfig, prt http.RoundTripper, st
return nil, err
}
}
cl.SetStore(st)
cl.SetBackend(schema.NewMembershipBackend(cfg.Logger, be))
br := bootstrapRaftFromCluster(cfg, cl, cl.MemberIDs())
cl.SetID(br.wal.id, cl.ID())
return &bootstrappedServer{
return &bootstrapedCluster{
remotes: nil,
raft: br,
cl: cl,
nodeID: m.ID,
}, nil
}
func bootstrapWithWAL(cfg config.ServerConfig, st v2store.Store, be backend.Backend, ss *snap.Snapshotter, beExist bool, beHooks *serverstorage.BackendHooks, ci cindex.ConsistentIndexer) (*bootstrappedServer, error) {
func bootstrapClusterWithWAL(cfg config.ServerConfig, meta *snapshotMetadata) (*bootstrapedCluster, error) {
if err := fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
return nil, fmt.Errorf("cannot write to member directory: %v", err)
}
if err := fileutil.IsDirWriteable(cfg.WALDir()); err != nil {
return nil, fmt.Errorf("cannot write to WAL directory: %v", err)
}
if cfg.ShouldDiscover() {
cfg.Logger.Warn(
"discovery token is ignored since cluster already initialized; valid logs are found",
zap.String("wal-dir", cfg.WALDir()),
)
}
cl := membership.NewCluster(cfg.Logger)
cl.SetID(meta.nodeID, meta.clusterID)
return &bootstrapedCluster{
cl: cl,
nodeID: meta.nodeID,
}, nil
}
func recoverSnapshot(cfg config.ServerConfig, st v2store.Store, be backend.Backend, beExist bool, beHooks *serverstorage.BackendHooks, ci cindex.ConsistentIndexer, ss *snap.Snapshotter) (*raftpb.Snapshot, backend.Backend, error) {
// Find a snapshot to start/restart a raft node
walSnaps, err := wal.ValidSnapshotEntries(cfg.Logger, cfg.WALDir())
if err != nil {
return nil, err
return nil, be, err
}
// snapshot files can be orphaned if etcd crashes after writing them but before writing the corresponding
// wal log entries
// bwal log entries
snapshot, err := ss.LoadNewestAvailable(walSnaps)
if err != nil && err != snap.ErrNoSnapshot {
return nil, err
return nil, be, err
}
if snapshot != nil {
@ -298,7 +386,7 @@ func bootstrapWithWAL(cfg config.ServerConfig, st v2store.Store, be backend.Back
if err = serverstorage.AssertNoV2StoreContent(cfg.Logger, st, cfg.V2Deprecation); err != nil {
cfg.Logger.Error("illegal v2store content", zap.Error(err))
return nil, err
return nil, be, err
}
cfg.Logger.Info(
@ -324,7 +412,7 @@ func bootstrapWithWAL(cfg config.ServerConfig, st v2store.Store, be backend.Back
kvindex := ci.ConsistentIndex()
if kvindex < snapshot.Metadata.Index {
if kvindex != 0 {
return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d)", cfg.BackendPath(), kvindex, snapshot.Metadata.Index)
return nil, be, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d)", cfg.BackendPath(), kvindex, snapshot.Metadata.Index)
}
cfg.Logger.Warn(
"consistent index was never saved",
@ -335,29 +423,47 @@ func bootstrapWithWAL(cfg config.ServerConfig, st v2store.Store, be backend.Back
} else {
cfg.Logger.Info("No snapshot found. Recovering WAL from scratch!")
}
r := &bootstrappedServer{}
if !cfg.ForceNewCluster {
r.raft = bootstrapRaftFromWal(cfg, snapshot)
} else {
r.raft = bootstrapRaftFromWalStandalone(cfg, snapshot)
}
r.raft.cl.SetStore(st)
r.raft.cl.SetBackend(schema.NewMembershipBackend(cfg.Logger, be))
r.raft.cl.Recover(api.UpdateCapability)
if r.raft.cl.Version() != nil && !r.raft.cl.Version().LessThan(semver.Version{Major: 3}) && !beExist {
bepath := cfg.BackendPath()
os.RemoveAll(bepath)
return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath)
}
return r, nil
return snapshot, be, nil
}
func bootstrapRaftFromCluster(cfg config.ServerConfig, cl *membership.RaftCluster, ids []types.ID) *bootstrappedRaft {
func (c *bootstrapedCluster) Finalize(cfg config.ServerConfig, s *bootstrappedStorage) error {
if !s.wal.haveWAL {
c.cl.SetID(c.nodeID, c.cl.ID())
}
c.cl.SetStore(s.st)
c.cl.SetBackend(schema.NewMembershipBackend(cfg.Logger, s.backend.be))
if s.wal.haveWAL {
c.cl.Recover(api.UpdateCapability)
if c.databaseFileMissing(s) {
bepath := cfg.BackendPath()
os.RemoveAll(bepath)
return fmt.Errorf("database file (%v) of the backend is missing", bepath)
}
}
return nil
}
func (c *bootstrapedCluster) databaseFileMissing(s *bootstrappedStorage) bool {
v3Cluster := c.cl.Version() != nil && !c.cl.Version().LessThan(semver.Version{Major: 3})
return v3Cluster && !s.backend.beExist
}
func bootstrapRaft(cfg config.ServerConfig, cluster *bootstrapedCluster, bwal *bootstrappedWAL) *bootstrappedRaft {
switch {
case !bwal.haveWAL && !cfg.NewCluster:
return bootstrapRaftFromCluster(cfg, cluster.cl, nil, bwal)
case !bwal.haveWAL && cfg.NewCluster:
return bootstrapRaftFromCluster(cfg, cluster.cl, cluster.cl.MemberIDs(), bwal)
case bwal.haveWAL:
return bootstrapRaftFromWAL(cfg, bwal)
default:
cfg.Logger.Panic("unsupported bootstrap config")
return nil
}
}
func bootstrapRaftFromCluster(cfg config.ServerConfig, cl *membership.RaftCluster, ids []types.ID, bwal *bootstrappedWAL) *bootstrappedRaft {
member := cl.MemberByName(cfg.Name)
id := member.ID
wal := bootstrapNewWAL(cfg, id, cl.ID())
peers := make([]raft.Peer, len(ids))
for i, id := range ids {
var ctx []byte
@ -369,69 +475,26 @@ func bootstrapRaftFromCluster(cfg config.ServerConfig, cl *membership.RaftCluste
}
cfg.Logger.Info(
"starting local member",
zap.String("local-member-id", id.String()),
zap.String("local-member-id", member.ID.String()),
zap.String("cluster-id", cl.ID().String()),
)
s := wal.MemoryStorage()
s := bwal.MemoryStorage()
return &bootstrappedRaft{
lg: cfg.Logger,
heartbeat: time.Duration(cfg.TickMs) * time.Millisecond,
cl: cl,
config: raftConfig(cfg, uint64(wal.id), s),
config: raftConfig(cfg, uint64(member.ID), s),
peers: peers,
storage: s,
wal: wal,
}
}
func bootstrapRaftFromWal(cfg config.ServerConfig, snapshot *raftpb.Snapshot) *bootstrappedRaft {
wal := bootstrapWALFromSnapshot(cfg.Logger, cfg.WALDir(), snapshot, cfg.UnsafeNoFsync)
cfg.Logger.Info(
"restarting local member",
zap.String("cluster-id", wal.cid.String()),
zap.String("local-member-id", wal.id.String()),
zap.Uint64("commit-index", wal.st.Commit),
)
cl := membership.NewCluster(cfg.Logger)
cl.SetID(wal.id, wal.cid)
s := wal.MemoryStorage()
func bootstrapRaftFromWAL(cfg config.ServerConfig, bwal *bootstrappedWAL) *bootstrappedRaft {
s := bwal.MemoryStorage()
return &bootstrappedRaft{
lg: cfg.Logger,
heartbeat: time.Duration(cfg.TickMs) * time.Millisecond,
cl: cl,
config: raftConfig(cfg, uint64(wal.id), s),
config: raftConfig(cfg, uint64(bwal.meta.nodeID), s),
storage: s,
wal: wal,
}
}
func bootstrapRaftFromWalStandalone(cfg config.ServerConfig, snapshot *raftpb.Snapshot) *bootstrappedRaft {
wal := bootstrapWALFromSnapshot(cfg.Logger, cfg.WALDir(), snapshot, cfg.UnsafeNoFsync)
// discard the previously uncommitted entries
wal.ents = wal.CommitedEntries()
entries := wal.ConfigChangeEntries()
// force commit config change entries
wal.AppendAndCommitEntries(entries)
cfg.Logger.Info(
"forcing restart member",
zap.String("cluster-id", wal.cid.String()),
zap.String("local-member-id", wal.id.String()),
zap.Uint64("commit-index", wal.st.Commit),
)
cl := membership.NewCluster(cfg.Logger)
cl.SetID(wal.id, wal.cid)
s := wal.MemoryStorage()
return &bootstrappedRaft{
lg: cfg.Logger,
heartbeat: time.Duration(cfg.TickMs) * time.Millisecond,
cl: cl,
config: raftConfig(cfg, uint64(wal.id), s),
storage: s,
wal: wal,
}
}
@ -449,18 +512,7 @@ func raftConfig(cfg config.ServerConfig, id uint64, s *raft.MemoryStorage) *raft
}
}
type bootstrappedRaft struct {
lg *zap.Logger
heartbeat time.Duration
peers []raft.Peer
config *raft.Config
cl *membership.RaftCluster
storage *raft.MemoryStorage
wal *bootstrappedWAL
}
func (b *bootstrappedRaft) newRaftNode(ss *snap.Snapshotter) *raftNode {
func (b *bootstrappedRaft) newRaftNode(ss *snap.Snapshotter, wal *wal.WAL, cl *membership.RaftCluster) *raftNode {
var n raft.Node
if len(b.peers) == 0 {
n = raft.RestartNode(b.config)
@ -473,30 +525,65 @@ func (b *bootstrappedRaft) newRaftNode(ss *snap.Snapshotter) *raftNode {
return newRaftNode(
raftNodeConfig{
lg: b.lg,
isIDRemoved: func(id uint64) bool { return b.cl.IsIDRemoved(types.ID(id)) },
isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
Node: n,
heartbeat: b.heartbeat,
raftStorage: b.storage,
storage: NewStorage(b.wal.w, ss),
storage: NewStorage(wal, ss),
},
)
}
// bootstrapWALFromSnapshot reads the WAL at the given snap and returns the wal, its latest HardState and cluster ID, and all entries that appear
func bootstrapWALFromSnapshot(cfg config.ServerConfig, snapshot *raftpb.Snapshot) *bootstrappedWAL {
wal, st, ents, snap, meta := openWALFromSnapshot(cfg, snapshot)
bwal := &bootstrappedWAL{
lg: cfg.Logger,
w: wal,
st: st,
ents: ents,
snapshot: snap,
meta: meta,
haveWAL: true,
}
if cfg.ForceNewCluster {
// discard the previously uncommitted entries
bwal.ents = bwal.CommitedEntries()
entries := bwal.NewConfigChangeEntries()
// force commit config change entries
bwal.AppendAndCommitEntries(entries)
cfg.Logger.Info(
"forcing restart member",
zap.String("cluster-id", meta.clusterID.String()),
zap.String("local-member-id", meta.nodeID.String()),
zap.Uint64("commit-index", bwal.st.Commit),
)
} else {
cfg.Logger.Info(
"restarting local member",
zap.String("cluster-id", meta.clusterID.String()),
zap.String("local-member-id", meta.nodeID.String()),
zap.Uint64("commit-index", bwal.st.Commit),
)
}
return bwal
}
// openWALFromSnapshot reads the WAL at the given snap and returns the wal, its latest HardState and cluster ID, and all entries that appear
// after the position of the given snap in the WAL.
// The snap must have been previously saved to the WAL, or this call will panic.
func bootstrapWALFromSnapshot(lg *zap.Logger, waldir string, snapshot *raftpb.Snapshot, unsafeNoFsync bool) *bootstrappedWAL {
func openWALFromSnapshot(cfg config.ServerConfig, snapshot *raftpb.Snapshot) (*wal.WAL, *raftpb.HardState, []raftpb.Entry, *raftpb.Snapshot, *snapshotMetadata) {
var walsnap walpb.Snapshot
if snapshot != nil {
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
}
repaired := false
for {
w, err := wal.Open(lg, waldir, walsnap)
w, err := wal.Open(cfg.Logger, cfg.WALDir(), walsnap)
if err != nil {
lg.Fatal("failed to open WAL", zap.Error(err))
cfg.Logger.Fatal("failed to open WAL", zap.Error(err))
}
if unsafeNoFsync {
if cfg.UnsafeNoFsync {
w.SetUnsafeNoFsync()
}
wmetadata, st, ents, err := w.ReadAll()
@ -504,12 +591,12 @@ func bootstrapWALFromSnapshot(lg *zap.Logger, waldir string, snapshot *raftpb.Sn
w.Close()
// we can only repair ErrUnexpectedEOF and we never repair twice.
if repaired || err != io.ErrUnexpectedEOF {
lg.Fatal("failed to read WAL, cannot be repaired", zap.Error(err))
cfg.Logger.Fatal("failed to read WAL, cannot be repaired", zap.Error(err))
}
if !wal.Repair(lg, waldir) {
lg.Fatal("failed to repair WAL", zap.Error(err))
if !wal.Repair(cfg.Logger, cfg.WALDir()) {
cfg.Logger.Fatal("failed to repair WAL", zap.Error(err))
} else {
lg.Info("repaired WAL", zap.Error(err))
cfg.Logger.Info("repaired WAL", zap.Error(err))
repaired = true
}
continue
@ -518,23 +605,20 @@ func bootstrapWALFromSnapshot(lg *zap.Logger, waldir string, snapshot *raftpb.Sn
pbutil.MustUnmarshal(&metadata, wmetadata)
id := types.ID(metadata.NodeID)
cid := types.ID(metadata.ClusterID)
return &bootstrappedWAL{
lg: lg,
w: w,
id: id,
cid: cid,
st: &st,
ents: ents,
snapshot: snapshot,
}
meta := &snapshotMetadata{clusterID: cid, nodeID: id}
return w, &st, ents, snapshot, meta
}
}
func bootstrapNewWAL(cfg config.ServerConfig, nodeID, clusterID types.ID) *bootstrappedWAL {
type snapshotMetadata struct {
nodeID, clusterID types.ID
}
func bootstrapNewWAL(cfg config.ServerConfig, cl *bootstrapedCluster) *bootstrappedWAL {
metadata := pbutil.MustMarshal(
&etcdserverpb.Metadata{
NodeID: uint64(nodeID),
ClusterID: uint64(clusterID),
NodeID: uint64(cl.nodeID),
ClusterID: uint64(cl.cl.ID()),
},
)
w, err := wal.Create(cfg.Logger, cfg.WALDir(), metadata)
@ -545,21 +629,20 @@ func bootstrapNewWAL(cfg config.ServerConfig, nodeID, clusterID types.ID) *boots
w.SetUnsafeNoFsync()
}
return &bootstrappedWAL{
lg: cfg.Logger,
w: w,
id: nodeID,
cid: clusterID,
lg: cfg.Logger,
w: w,
}
}
type bootstrappedWAL struct {
lg *zap.Logger
haveWAL bool
w *wal.WAL
id, cid types.ID
st *raftpb.HardState
ents []raftpb.Entry
snapshot *raftpb.Snapshot
meta *snapshotMetadata
}
func (wal *bootstrappedWAL) MemoryStorage() *raft.MemoryStorage {
@ -591,11 +674,11 @@ func (wal *bootstrappedWAL) CommitedEntries() []raftpb.Entry {
return wal.ents
}
func (wal *bootstrappedWAL) ConfigChangeEntries() []raftpb.Entry {
func (wal *bootstrappedWAL) NewConfigChangeEntries() []raftpb.Entry {
return serverstorage.CreateConfigChangeEnts(
wal.lg,
serverstorage.GetIDs(wal.lg, wal.snapshot, wal.ents),
uint64(wal.id),
serverstorage.GetEffectiveNodeIDsFromWalEntries(wal.lg, wal.snapshot, wal.ents),
uint64(wal.meta.nodeID),
wal.st.Term,
wal.st.Commit,
)

View File

@ -67,7 +67,7 @@ func TestGetIDs(t *testing.T) {
if tt.confState != nil {
snap.Metadata.ConfState = *tt.confState
}
idSet := serverstorage.GetIDs(testLogger, &snap, tt.ents)
idSet := serverstorage.GetEffectiveNodeIDsFromWalEntries(testLogger, &snap, tt.ents)
if !reflect.DeepEqual(idSet, tt.widSet) {
t.Errorf("#%d: idset = %#v, want %#v", i, idSet, tt.widSet)
}

View File

@ -304,12 +304,12 @@ func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) {
defer func() {
if err != nil {
b.be.Close()
b.Close()
}
}()
sstats := stats.NewServerStats(cfg.Name, b.raft.wal.id.String())
lstats := stats.NewLeaderStats(cfg.Logger, b.raft.wal.id.String())
sstats := stats.NewServerStats(cfg.Name, b.cluster.cl.String())
lstats := stats.NewLeaderStats(cfg.Logger, b.cluster.nodeID.String())
heartbeat := time.Duration(cfg.TickMs) * time.Millisecond
srv = &EtcdServer{
@ -318,28 +318,28 @@ func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) {
lgMu: new(sync.RWMutex),
lg: cfg.Logger,
errorc: make(chan error, 1),
v2store: b.st,
v2store: b.storage.st,
snapshotter: b.ss,
r: *b.raft.newRaftNode(b.ss),
id: b.raft.wal.id,
r: *b.raft.newRaftNode(b.ss, b.storage.wal.w, b.cluster.cl),
id: b.cluster.nodeID,
attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
cluster: b.raft.cl,
cluster: b.cluster.cl,
stats: sstats,
lstats: lstats,
SyncTicker: time.NewTicker(500 * time.Millisecond),
peerRt: b.prt,
reqIDGen: idutil.NewGenerator(uint16(b.raft.wal.id), time.Now()),
reqIDGen: idutil.NewGenerator(uint16(b.cluster.nodeID), time.Now()),
AccessController: &AccessController{CORS: cfg.CORS, HostWhitelist: cfg.HostWhitelist},
consistIndex: b.ci,
consistIndex: b.storage.backend.ci,
firstCommitInTerm: notify.NewNotifier(),
clusterVersionChanged: notify.NewNotifier(),
}
serverID.With(prometheus.Labels{"server_id": b.raft.wal.id.String()}).Set(1)
serverID.With(prometheus.Labels{"server_id": b.cluster.nodeID.String()}).Set(1)
srv.cluster.SetVersionChangedNotifier(srv.clusterVersionChanged)
srv.applyV2 = NewApplierV2(cfg.Logger, srv.v2store, srv.cluster)
srv.be = b.be
srv.beHooks = b.beHooks
srv.be = b.storage.backend.be
srv.beHooks = b.storage.backend.beHooks
minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat
// always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
@ -403,9 +403,9 @@ func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) {
Logger: cfg.Logger,
TLSInfo: cfg.PeerTLSInfo,
DialTimeout: cfg.PeerDialTimeout(),
ID: b.raft.wal.id,
ID: b.cluster.nodeID,
URLs: cfg.PeerURLs,
ClusterID: b.raft.cl.ID(),
ClusterID: b.cluster.cl.ID(),
Raft: srv,
Snapshotter: b.ss,
ServerStats: sstats,
@ -416,13 +416,13 @@ func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) {
return nil, err
}
// add all remotes into transport
for _, m := range b.remotes {
if m.ID != b.raft.wal.id {
for _, m := range b.cluster.remotes {
if m.ID != b.cluster.nodeID {
tr.AddRemote(m.ID, m.PeerURLs)
}
}
for _, m := range b.raft.cl.Members() {
if m.ID != b.raft.wal.id {
for _, m := range b.cluster.cl.Members() {
if m.ID != b.cluster.nodeID {
tr.AddPeer(m.ID, m.PeerURLs)
}
}

View File

@ -432,6 +432,8 @@ func (b *backend) Defrag() error {
func (b *backend) defrag() error {
now := time.Now()
isDefragActive.Set(1)
defer isDefragActive.Set(0)
// TODO: make this non-blocking?
// lock batchTx to ensure nobody is using previous tx, and then

View File

@ -83,6 +83,13 @@ var (
// highest bucket start of 0.01 sec * 2^16 == 655.36 sec
Buckets: prometheus.ExponentialBuckets(.01, 2, 17),
})
isDefragActive = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "etcd",
Subsystem: "disk",
Name: "defrag_inflight",
Help: "Whether or not defrag is active on the member. 1 means active, 0 means not.",
})
)
func init() {
@ -92,4 +99,5 @@ func init() {
prometheus.MustRegister(writeSec)
prometheus.MustRegister(defragSec)
prometheus.MustRegister(snapshotTransferSec)
prometheus.MustRegister(isDefragActive)
}

View File

@ -358,7 +358,7 @@ func (s *watchableStore) syncWatchers() int {
tx.RUnlock()
evs := kvsToEvents(s.store.lg, wg, revs, vs)
var victims watcherBatch
victims := make(watcherBatch)
wb := newWatcherBatch(wg, evs)
for w := range wg.watchers {
w.minRev = curRev + 1
@ -378,9 +378,6 @@ func (s *watchableStore) syncWatchers() int {
if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: curRev}) {
pendingEventsGauge.Add(float64(len(eb.evs)))
} else {
if victims == nil {
victims = make(watcherBatch)
}
w.victim = true
}
@ -432,7 +429,7 @@ func kvsToEvents(lg *zap.Logger, wg *watcherGroup, revs, vals [][]byte) (evs []m
// notify notifies the fact that given event at the given rev just happened to
// watchers that watch on the key of the event.
func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) {
var victim watcherBatch
victim := make(watcherBatch)
for w, eb := range newWatcherBatch(&s.synced, evs) {
if eb.revs != 1 {
s.store.lg.Panic(
@ -445,9 +442,6 @@ func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) {
} else {
// move slow watcher to victims
w.minRev = rev + 1
if victim == nil {
victim = make(watcherBatch)
}
w.victim = true
victim[w] = eb
s.synced.delete(w)
@ -458,7 +452,7 @@ func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) {
}
func (s *watchableStore) addVictim(victim watcherBatch) {
if victim == nil {
if len(victim) == 0 {
return
}
s.victims = append(s.victims, victim)

View File

@ -109,13 +109,13 @@ func CreateConfigChangeEnts(lg *zap.Logger, ids []uint64, self uint64, term, ind
return ents
}
// GetIDs returns an ordered set of IDs included in the given snapshot and
// GetEffectiveNodeIDsFromWalEntries returns an ordered set of IDs included in the given snapshot and
// the entries. The given snapshot/entries can contain three kinds of
// ID-related entry:
// - ConfChangeAddNode, in which case the contained ID will Be added into the set.
// - ConfChangeRemoveNode, in which case the contained ID will Be removed from the set.
// - ConfChangeAddLearnerNode, in which the contained ID will Be added into the set.
func GetIDs(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
func GetEffectiveNodeIDsFromWalEntries(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
ids := make(map[uint64]bool)
if snap != nil {
for _, id := range snap.Metadata.ConfState.Voters {

View File

@ -116,7 +116,7 @@ func Create(lg *zap.Logger, dirpath string, metadata []byte) (*WAL, error) {
}
defer os.RemoveAll(tmpdirpath)
if err := fileutil.CreateDirAll(tmpdirpath); err != nil {
if err := fileutil.CreateDirAll(lg, tmpdirpath); err != nil {
lg.Warn(
"failed to create a temporary WAL directory",
zap.String("tmp-dir-path", tmpdirpath),

View File

@ -115,6 +115,10 @@ func (p *proxyEtcdProcess) WithStopSignal(sig os.Signal) os.Signal {
return p.etcdProc.WithStopSignal(sig)
}
func (p *proxyEtcdProcess) Logs() logsExpect {
return p.etcdProc.Logs()
}
type proxyProc struct {
lg *zap.Logger
execPath string
@ -132,7 +136,7 @@ func (pp *proxyProc) start() error {
if pp.proc != nil {
panic("already started")
}
proc, err := spawnCmdWithLogger(pp.lg, append([]string{pp.execPath}, pp.args...))
proc, err := spawnCmdWithLogger(pp.lg, append([]string{pp.execPath}, pp.args...), nil)
if err != nil {
return err
}

View File

@ -144,6 +144,7 @@ type etcdProcessClusterConfig struct {
execPath string
dataDirPath string
keepDataDir bool
envVars map[string]string
clusterSize int
@ -318,6 +319,7 @@ func (cfg *etcdProcessClusterConfig) etcdServerProcessConfigs(tb testing.TB) []*
lg: lg,
execPath: cfg.execPath,
args: args,
envVars: cfg.envVars,
tlsArgs: cfg.tlsArgs(),
dataDirPath: dataDirPath,
keepDataDir: cfg.keepDataDir,

View File

@ -505,7 +505,7 @@ func etcdctlBackup(t testing.TB, clus *etcdProcessCluster, dataDir, backupDir st
cmdArgs = append(cmdArgs, "--with-v3=false")
}
t.Logf("Running: %v", cmdArgs)
proc, err := spawnCmd(cmdArgs)
proc, err := spawnCmd(cmdArgs, nil)
if err != nil {
return err
}

View File

@ -101,5 +101,5 @@ func alarmTest(cx ctlCtx) {
func ctlV3Alarm(cx ctlCtx, cmd string, as ...string) error {
cmdArgs := append(cx.PrefixArgs(), "alarm", cmd)
return spawnWithExpects(cmdArgs, as...)
return spawnWithExpects(cmdArgs, cx.envMap, as...)
}

View File

@ -93,7 +93,7 @@ func authEnable(cx ctlCtx) error {
func ctlV3AuthEnable(cx ctlCtx) error {
cmdArgs := append(cx.PrefixArgs(), "auth", "enable")
return spawnWithExpect(cmdArgs, "Authentication Enabled")
return spawnWithExpectWithEnv(cmdArgs, cx.envMap, "Authentication Enabled")
}
func authDisableTest(cx ctlCtx) {
@ -139,12 +139,12 @@ func authDisableTest(cx ctlCtx) {
func ctlV3AuthDisable(cx ctlCtx) error {
cmdArgs := append(cx.PrefixArgs(), "auth", "disable")
return spawnWithExpect(cmdArgs, "Authentication Disabled")
return spawnWithExpectWithEnv(cmdArgs, cx.envMap, "Authentication Disabled")
}
func authStatusTest(cx ctlCtx) {
cmdArgs := append(cx.PrefixArgs(), "auth", "status")
if err := spawnWithExpects(cmdArgs, "Authentication Status: false", "AuthRevision:"); err != nil {
if err := spawnWithExpects(cmdArgs, cx.envMap, "Authentication Status: false", "AuthRevision:"); err != nil {
cx.t.Fatal(err)
}
@ -155,15 +155,15 @@ func authStatusTest(cx ctlCtx) {
cx.user, cx.pass = "root", "root"
cmdArgs = append(cx.PrefixArgs(), "auth", "status")
if err := spawnWithExpects(cmdArgs, "Authentication Status: true", "AuthRevision:"); err != nil {
if err := spawnWithExpects(cmdArgs, cx.envMap, "Authentication Status: true", "AuthRevision:"); err != nil {
cx.t.Fatal(err)
}
cmdArgs = append(cx.PrefixArgs(), "auth", "status", "--write-out", "json")
if err := spawnWithExpect(cmdArgs, "enabled"); err != nil {
if err := spawnWithExpectWithEnv(cmdArgs, cx.envMap, "enabled"); err != nil {
cx.t.Fatal(err)
}
if err := spawnWithExpect(cmdArgs, "authRevision"); err != nil {
if err := spawnWithExpectWithEnv(cmdArgs, cx.envMap, "authRevision"); err != nil {
cx.t.Fatal(err)
}
}
@ -381,25 +381,25 @@ func authRoleRevokeDuringOpsTest(cx ctlCtx) {
}
func ctlV3PutFailAuth(cx ctlCtx, key, val string) error {
return spawnWithExpect(append(cx.PrefixArgs(), "put", key, val), "authentication failed")
return spawnWithExpectWithEnv(append(cx.PrefixArgs(), "put", key, val), cx.envMap, "authentication failed")
}
func ctlV3PutFailPerm(cx ctlCtx, key, val string) error {
return spawnWithExpect(append(cx.PrefixArgs(), "put", key, val), "permission denied")
return spawnWithExpectWithEnv(append(cx.PrefixArgs(), "put", key, val), cx.envMap, "permission denied")
}
func authSetupTestUser(cx ctlCtx) {
if err := ctlV3User(cx, []string{"add", "test-user", "--interactive=false"}, "User test-user created", []string{"pass"}); err != nil {
cx.t.Fatal(err)
}
if err := spawnWithExpect(append(cx.PrefixArgs(), "role", "add", "test-role"), "Role test-role created"); err != nil {
if err := spawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "add", "test-role"), cx.envMap, "Role test-role created"); err != nil {
cx.t.Fatal(err)
}
if err := ctlV3User(cx, []string{"grant-role", "test-user", "test-role"}, "Role test-role is granted to user test-user", nil); err != nil {
cx.t.Fatal(err)
}
cmd := append(cx.PrefixArgs(), "role", "grant-permission", "test-role", "readwrite", "foo")
if err := spawnWithExpect(cmd, "Role test-role updated"); err != nil {
if err := spawnWithExpectWithEnv(cmd, cx.envMap, "Role test-role updated"); err != nil {
cx.t.Fatal(err)
}
}
@ -611,7 +611,7 @@ func authTestCertCN(cx ctlCtx) {
if err := ctlV3User(cx, []string{"add", "example.com", "--interactive=false"}, "User example.com created", []string{""}); err != nil {
cx.t.Fatal(err)
}
if err := spawnWithExpect(append(cx.PrefixArgs(), "role", "add", "test-role"), "Role test-role created"); err != nil {
if err := spawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "add", "test-role"), cx.envMap, "Role test-role created"); err != nil {
cx.t.Fatal(err)
}
if err := ctlV3User(cx, []string{"grant-role", "example.com", "test-role"}, "Role test-role is granted to user example.com", nil); err != nil {
@ -921,13 +921,13 @@ func authTestRoleGet(cx ctlCtx) {
"KV Read:", "foo",
"KV Write:", "foo",
}
if err := spawnWithExpects(append(cx.PrefixArgs(), "role", "get", "test-role"), expected...); err != nil {
if err := spawnWithExpects(append(cx.PrefixArgs(), "role", "get", "test-role"), cx.envMap, expected...); err != nil {
cx.t.Fatal(err)
}
// test-user can get the information of test-role because it belongs to the role
cx.user, cx.pass = "test-user", "pass"
if err := spawnWithExpects(append(cx.PrefixArgs(), "role", "get", "test-role"), expected...); err != nil {
if err := spawnWithExpects(append(cx.PrefixArgs(), "role", "get", "test-role"), cx.envMap, expected...); err != nil {
cx.t.Fatal(err)
}
@ -935,7 +935,7 @@ func authTestRoleGet(cx ctlCtx) {
expected = []string{
"Error: etcdserver: permission denied",
}
if err := spawnWithExpects(append(cx.PrefixArgs(), "role", "get", "root"), expected...); err != nil {
if err := spawnWithExpects(append(cx.PrefixArgs(), "role", "get", "root"), cx.envMap, expected...); err != nil {
cx.t.Fatal(err)
}
}
@ -952,13 +952,13 @@ func authTestUserGet(cx ctlCtx) {
"Roles: test-role",
}
if err := spawnWithExpects(append(cx.PrefixArgs(), "user", "get", "test-user"), expected...); err != nil {
if err := spawnWithExpects(append(cx.PrefixArgs(), "user", "get", "test-user"), cx.envMap, expected...); err != nil {
cx.t.Fatal(err)
}
// test-user can get the information of test-user itself
cx.user, cx.pass = "test-user", "pass"
if err := spawnWithExpects(append(cx.PrefixArgs(), "user", "get", "test-user"), expected...); err != nil {
if err := spawnWithExpects(append(cx.PrefixArgs(), "user", "get", "test-user"), cx.envMap, expected...); err != nil {
cx.t.Fatal(err)
}
@ -966,7 +966,7 @@ func authTestUserGet(cx ctlCtx) {
expected = []string{
"Error: etcdserver: permission denied",
}
if err := spawnWithExpects(append(cx.PrefixArgs(), "user", "get", "root"), expected...); err != nil {
if err := spawnWithExpects(append(cx.PrefixArgs(), "user", "get", "root"), cx.envMap, expected...); err != nil {
cx.t.Fatal(err)
}
}
@ -977,7 +977,7 @@ func authTestRoleList(cx ctlCtx) {
}
cx.user, cx.pass = "root", "root"
authSetupTestUser(cx)
if err := spawnWithExpect(append(cx.PrefixArgs(), "role", "list"), "test-role"); err != nil {
if err := spawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "list"), cx.envMap, "test-role"); err != nil {
cx.t.Fatal(err)
}
}
@ -1088,7 +1088,7 @@ func certCNAndUsername(cx ctlCtx, noPassword bool) {
cx.t.Fatal(err)
}
}
if err := spawnWithExpect(append(cx.PrefixArgs(), "role", "add", "test-role-cn"), "Role test-role-cn created"); err != nil {
if err := spawnWithExpectWithEnv(append(cx.PrefixArgs(), "role", "add", "test-role-cn"), cx.envMap, "Role test-role-cn created"); err != nil {
cx.t.Fatal(err)
}
if err := ctlV3User(cx, []string{"grant-role", "example.com", "test-role-cn"}, "Role test-role-cn is granted to user example.com", nil); err != nil {

View File

@ -71,5 +71,5 @@ func ctlV3Compact(cx ctlCtx, rev int64, physical bool) error {
if physical {
cmdArgs = append(cmdArgs, "--physical")
}
return spawnWithExpect(cmdArgs, "compacted revision "+rs)
return spawnWithExpectWithEnv(cmdArgs, cx.envMap, "compacted revision "+rs)
}

View File

@ -52,13 +52,13 @@ func ctlV3OnlineDefrag(cx ctlCtx) error {
for i := range lines {
lines[i] = "Finished defragmenting etcd member"
}
return spawnWithExpects(cmdArgs, lines...)
return spawnWithExpects(cmdArgs, cx.envMap, lines...)
}
func ctlV3OfflineDefrag(cx ctlCtx) error {
cmdArgs := append(cx.PrefixArgsUtl(), "defrag", "--data-dir", cx.dataDir)
lines := []string{"finished defragmenting directory"}
return spawnWithExpects(cmdArgs, lines...)
return spawnWithExpects(cmdArgs, cx.envMap, lines...)
}
func defragOfflineTest(cx ctlCtx) {

View File

@ -98,7 +98,7 @@ func testElect(cx ctlCtx) {
// ctlV3Elect creates a elect process with a channel listening for when it wins the election.
func ctlV3Elect(cx ctlCtx, name, proposal string) (*expect.ExpectProcess, <-chan string, error) {
cmdArgs := append(cx.PrefixArgs(), "elect", name, proposal)
proc, err := spawnCmd(cmdArgs)
proc, err := spawnCmd(cmdArgs, cx.envMap)
outc := make(chan string, 1)
if err != nil {
close(outc)

View File

@ -40,7 +40,7 @@ func ctlV3EndpointHealth(cx ctlCtx) error {
for i := range lines {
lines[i] = "is healthy"
}
return spawnWithExpects(cmdArgs, lines...)
return spawnWithExpects(cmdArgs, cx.envMap, lines...)
}
func endpointStatusTest(cx ctlCtx) {
@ -56,7 +56,7 @@ func ctlV3EndpointStatus(cx ctlCtx) error {
u, _ := url.Parse(ep)
eps = append(eps, u.Host)
}
return spawnWithExpects(cmdArgs, eps...)
return spawnWithExpects(cmdArgs, cx.envMap, eps...)
}
func endpointHashKVTest(cx ctlCtx) {
@ -88,5 +88,5 @@ func ctlV3EndpointHashKV(cx ctlCtx) error {
u, _ := url.Parse(ep)
ss = append(ss, fmt.Sprintf("%s, %d", u.Host, hresp.Hash))
}
return spawnWithExpects(cmdArgs, ss...)
return spawnWithExpects(cmdArgs, cx.envMap, ss...)
}

View File

@ -0,0 +1,213 @@
// Copyright 2021 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !cluster_proxy
// +build !cluster_proxy
package e2e
import (
"fmt"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"go.etcd.io/etcd/client/pkg/v3/testutil"
)
func TestAuthority(t *testing.T) {
tcs := []struct {
name string
useTLS bool
useInsecureTLS bool
// Pattern used to generate endpoints for client. Fields filled
// %d - will be filled with member grpc port
clientURLPattern string
// Pattern used to validate authority received by server. Fields filled:
// %d - will be filled with first member grpc port
expectAuthorityPattern string
}{
{
name: "http://domain[:port]",
clientURLPattern: "http://localhost:%d",
expectAuthorityPattern: "localhost:%d",
},
{
name: "http://address[:port]",
clientURLPattern: "http://127.0.0.1:%d",
expectAuthorityPattern: "127.0.0.1:%d",
},
{
name: "https://domain[:port] insecure",
useTLS: true,
useInsecureTLS: true,
clientURLPattern: "https://localhost:%d",
expectAuthorityPattern: "localhost:%d",
},
{
name: "https://address[:port] insecure",
useTLS: true,
useInsecureTLS: true,
clientURLPattern: "https://127.0.0.1:%d",
expectAuthorityPattern: "127.0.0.1:%d",
},
{
name: "https://domain[:port]",
useTLS: true,
clientURLPattern: "https://localhost:%d",
expectAuthorityPattern: "localhost:%d",
},
{
name: "https://address[:port]",
useTLS: true,
clientURLPattern: "https://127.0.0.1:%d",
expectAuthorityPattern: "127.0.0.1:%d",
},
}
for _, tc := range tcs {
for _, clusterSize := range []int{1, 3} {
t.Run(fmt.Sprintf("Size: %d, Scenario: %q", clusterSize, tc.name), func(t *testing.T) {
BeforeTest(t)
cfg := newConfigNoTLS()
cfg.clusterSize = clusterSize
if tc.useTLS {
cfg.clientTLS = clientTLS
}
cfg.isClientAutoTLS = tc.useInsecureTLS
// Enable debug mode to get logs with http2 headers (including authority)
cfg.envVars = map[string]string{"GODEBUG": "http2debug=2"}
epc, err := newEtcdProcessCluster(t, cfg)
if err != nil {
t.Fatalf("could not start etcd process cluster (%v)", err)
}
defer epc.Close()
endpoints := templateEndpoints(t, tc.clientURLPattern, epc)
client := clusterEtcdctlV3(cfg, endpoints)
err = client.Put("foo", "bar")
if err != nil {
t.Fatal(err)
}
executeWithTimeout(t, 5*time.Second, func() {
assertAuthority(t, fmt.Sprintf(tc.expectAuthorityPattern, 20000), epc)
})
})
}
}
}
func templateEndpoints(t *testing.T, pattern string, clus *etcdProcessCluster) []string {
t.Helper()
endpoints := []string{}
for i := 0; i < clus.cfg.clusterSize; i++ {
ent := pattern
if strings.Contains(ent, "%d") {
ent = fmt.Sprintf(ent, etcdProcessBasePort+i*5)
}
if strings.Contains(ent, "%") {
t.Fatalf("Failed to template pattern, %% symbol left %q", ent)
}
endpoints = append(endpoints, ent)
}
return endpoints
}
func assertAuthority(t *testing.T, expectAurhority string, clus *etcdProcessCluster) {
logs := []logsExpect{}
for _, proc := range clus.procs {
logs = append(logs, proc.Logs())
}
line := firstMatch(t, `http2: decoded hpack field header field ":authority"`, logs...)
line = strings.TrimSuffix(line, "\n")
line = strings.TrimSuffix(line, "\r")
expectLine := fmt.Sprintf(`http2: decoded hpack field header field ":authority" = %q`, expectAurhority)
assert.True(t, strings.HasSuffix(line, expectLine), fmt.Sprintf("Got %q expected suffix %q", line, expectLine))
}
func firstMatch(t *testing.T, expectLine string, logs ...logsExpect) string {
t.Helper()
match := make(chan string, len(logs))
for i := range logs {
go func(l logsExpect) {
line, _ := l.Expect(expectLine)
match <- line
}(logs[i])
}
return <-match
}
func executeWithTimeout(t *testing.T, timeout time.Duration, f func()) {
donec := make(chan struct{})
go func() {
defer close(donec)
f()
}()
select {
case <-time.After(timeout):
testutil.FatalStack(t, fmt.Sprintf("test timed out after %v", timeout))
case <-donec:
}
}
type etcdctlV3 struct {
cfg *etcdProcessClusterConfig
endpoints []string
}
func clusterEtcdctlV3(cfg *etcdProcessClusterConfig, endpoints []string) *etcdctlV3 {
return &etcdctlV3{
cfg: cfg,
endpoints: endpoints,
}
}
func (ctl *etcdctlV3) Put(key, value string) error {
return ctl.runCmd("put", key, value)
}
func (ctl *etcdctlV3) runCmd(args ...string) error {
cmdArgs := []string{ctlBinPath + "3"}
for k, v := range ctl.flags() {
cmdArgs = append(cmdArgs, fmt.Sprintf("--%s=%s", k, v))
}
cmdArgs = append(cmdArgs, args...)
return spawnWithExpect(cmdArgs, "OK")
}
func (ctl *etcdctlV3) flags() map[string]string {
fmap := make(map[string]string)
if ctl.cfg.clientTLS == clientTLS {
if ctl.cfg.isClientAutoTLS {
fmap["insecure-transport"] = "false"
fmap["insecure-skip-tls-verify"] = "true"
} else if ctl.cfg.isClientCRL {
fmap["cacert"] = caPath
fmap["cert"] = revokedCertPath
fmap["key"] = revokedPrivateKeyPath
} else {
fmap["cacert"] = caPath
fmap["cert"] = certPath
fmap["key"] = privateKeyPath
}
}
fmap["endpoints"] = strings.Join(ctl.endpoints, ",")
return fmap
}

View File

@ -190,7 +190,7 @@ func getFormatTest(cx ctlCtx) {
cmdArgs = append(cmdArgs, "--print-value-only")
}
cmdArgs = append(cmdArgs, "abc")
if err := spawnWithExpect(cmdArgs, tt.wstr); err != nil {
if err := spawnWithExpectWithEnv(cmdArgs, cx.envMap, tt.wstr); err != nil {
cx.t.Errorf("#%d: error (%v), wanted %v", i, err, tt.wstr)
}
}
@ -228,24 +228,24 @@ func getKeysOnlyTest(cx ctlCtx) {
cx.t.Fatal(err)
}
cmdArgs := append(cx.PrefixArgs(), []string{"get", "--keys-only", "key"}...)
if err := spawnWithExpect(cmdArgs, "key"); err != nil {
if err := spawnWithExpectWithEnv(cmdArgs, cx.envMap, "key"); err != nil {
cx.t.Fatal(err)
}
if err := spawnWithExpects(cmdArgs, "val"); err == nil {
if err := spawnWithExpects(cmdArgs, cx.envMap, "val"); err == nil {
cx.t.Fatalf("got value but passed --keys-only")
}
}
func getCountOnlyTest(cx ctlCtx) {
cmdArgs := append(cx.PrefixArgs(), []string{"get", "--count-only", "key", "--prefix", "--write-out=fields"}...)
if err := spawnWithExpects(cmdArgs, "\"Count\" : 0"); err != nil {
if err := spawnWithExpects(cmdArgs, cx.envMap, "\"Count\" : 0"); err != nil {
cx.t.Fatal(err)
}
if err := ctlV3Put(cx, "key", "val", ""); err != nil {
cx.t.Fatal(err)
}
cmdArgs = append(cx.PrefixArgs(), []string{"get", "--count-only", "key", "--prefix", "--write-out=fields"}...)
if err := spawnWithExpects(cmdArgs, "\"Count\" : 1"); err != nil {
if err := spawnWithExpects(cmdArgs, cx.envMap, "\"Count\" : 1"); err != nil {
cx.t.Fatal(err)
}
if err := ctlV3Put(cx, "key1", "val", ""); err != nil {
@ -255,21 +255,21 @@ func getCountOnlyTest(cx ctlCtx) {
cx.t.Fatal(err)
}
cmdArgs = append(cx.PrefixArgs(), []string{"get", "--count-only", "key", "--prefix", "--write-out=fields"}...)
if err := spawnWithExpects(cmdArgs, "\"Count\" : 2"); err != nil {
if err := spawnWithExpects(cmdArgs, cx.envMap, "\"Count\" : 2"); err != nil {
cx.t.Fatal(err)
}
if err := ctlV3Put(cx, "key2", "val", ""); err != nil {
cx.t.Fatal(err)
}
cmdArgs = append(cx.PrefixArgs(), []string{"get", "--count-only", "key", "--prefix", "--write-out=fields"}...)
if err := spawnWithExpects(cmdArgs, "\"Count\" : 3"); err != nil {
if err := spawnWithExpects(cmdArgs, cx.envMap, "\"Count\" : 3"); err != nil {
cx.t.Fatal(err)
}
expected := []string{
"\"Count\" : 3",
}
cmdArgs = append(cx.PrefixArgs(), []string{"get", "--count-only", "key3", "--prefix", "--write-out=fields"}...)
if err := spawnWithExpects(cmdArgs, expected...); err == nil {
if err := spawnWithExpects(cmdArgs, cx.envMap, expected...); err == nil {
cx.t.Fatal(err)
}
}
@ -348,7 +348,7 @@ func ctlV3Put(cx ctlCtx, key, value, leaseID string, flags ...string) error {
if len(flags) != 0 {
cmdArgs = append(cmdArgs, flags...)
}
return spawnWithExpect(cmdArgs, "OK")
return spawnWithExpectWithEnv(cmdArgs, cx.envMap, "OK")
}
type kv struct {
@ -365,7 +365,7 @@ func ctlV3Get(cx ctlCtx, args []string, kvs ...kv) error {
for _, elem := range kvs {
lines = append(lines, elem.key, elem.val)
}
return spawnWithExpects(cmdArgs, lines...)
return spawnWithExpects(cmdArgs, cx.envMap, lines...)
}
// ctlV3GetWithErr runs "get" command expecting no output but error
@ -375,11 +375,11 @@ func ctlV3GetWithErr(cx ctlCtx, args []string, errs []string) error {
if !cx.quorum {
cmdArgs = append(cmdArgs, "--consistency", "s")
}
return spawnWithExpects(cmdArgs, errs...)
return spawnWithExpects(cmdArgs, cx.envMap, errs...)
}
func ctlV3Del(cx ctlCtx, args []string, num int) error {
cmdArgs := append(cx.PrefixArgs(), "del")
cmdArgs = append(cmdArgs, args...)
return spawnWithExpects(cmdArgs, fmt.Sprintf("%d", num))
return spawnWithExpects(cmdArgs, cx.envMap, fmt.Sprintf("%d", num))
}

View File

@ -113,7 +113,7 @@ func leaseTestGrantTimeToLive(cx ctlCtx) {
}
cmdArgs := append(cx.PrefixArgs(), "lease", "timetolive", id, "--keys")
proc, err := spawnCmd(cmdArgs)
proc, err := spawnCmd(cmdArgs, cx.envMap)
if err != nil {
cx.t.Fatalf("leaseTestGrantTimeToLive: error (%v)", err)
}
@ -146,7 +146,7 @@ func leaseTestGrantLeasesList(cx ctlCtx) error {
}
cmdArgs := append(cx.PrefixArgs(), "lease", "list")
proc, err := spawnCmd(cmdArgs)
proc, err := spawnCmd(cmdArgs, cx.envMap)
if err != nil {
return fmt.Errorf("lease list failed (%v)", err)
}
@ -177,7 +177,7 @@ func leaseTestTimeToLiveExpire(cx ctlCtx, ttl int) error {
time.Sleep(time.Duration(ttl+1) * time.Second)
cmdArgs := append(cx.PrefixArgs(), "lease", "timetolive", leaseID)
exp := fmt.Sprintf("lease %s already expired", leaseID)
if err = spawnWithExpect(cmdArgs, exp); err != nil {
if err = spawnWithExpectWithEnv(cmdArgs, cx.envMap, exp); err != nil {
return fmt.Errorf("lease not properly expired: (%v)", err)
}
if err := ctlV3Get(cx, []string{"key"}); err != nil {
@ -247,7 +247,7 @@ func leaseTestRevoke(cx ctlCtx) error {
func ctlV3LeaseGrant(cx ctlCtx, ttl int) (string, error) {
cmdArgs := append(cx.PrefixArgs(), "lease", "grant", strconv.Itoa(ttl))
proc, err := spawnCmd(cmdArgs)
proc, err := spawnCmd(cmdArgs, cx.envMap)
if err != nil {
return "", err
}
@ -271,7 +271,7 @@ func ctlV3LeaseGrant(cx ctlCtx, ttl int) (string, error) {
func ctlV3LeaseKeepAlive(cx ctlCtx, leaseID string) error {
cmdArgs := append(cx.PrefixArgs(), "lease", "keep-alive", leaseID)
proc, err := spawnCmd(cmdArgs)
proc, err := spawnCmd(cmdArgs, nil)
if err != nil {
return err
}
@ -285,7 +285,7 @@ func ctlV3LeaseKeepAlive(cx ctlCtx, leaseID string) error {
func ctlV3LeaseKeepAliveOnce(cx ctlCtx, leaseID string) error {
cmdArgs := append(cx.PrefixArgs(), "lease", "keep-alive", "--once", leaseID)
proc, err := spawnCmd(cmdArgs)
proc, err := spawnCmd(cmdArgs, nil)
if err != nil {
return err
}
@ -298,5 +298,5 @@ func ctlV3LeaseKeepAliveOnce(cx ctlCtx, leaseID string) error {
func ctlV3LeaseRevoke(cx ctlCtx, leaseID string) error {
cmdArgs := append(cx.PrefixArgs(), "lease", "revoke", leaseID)
return spawnWithExpect(cmdArgs, fmt.Sprintf("lease %s revoked", leaseID))
return spawnWithExpectWithEnv(cmdArgs, cx.envMap, fmt.Sprintf("lease %s revoked", leaseID))
}

View File

@ -119,7 +119,7 @@ func testLockWithCmd(cx ctlCtx) {
// ctlV3Lock creates a lock process with a channel listening for when it acquires the lock.
func ctlV3Lock(cx ctlCtx, name string) (*expect.ExpectProcess, <-chan string, error) {
cmdArgs := append(cx.PrefixArgs(), "lock", name)
proc, err := spawnCmd(cmdArgs)
proc, err := spawnCmd(cmdArgs, cx.envMap)
outc := make(chan string, 1)
if err != nil {
close(outc)
@ -140,5 +140,5 @@ func ctlV3LockWithCmd(cx ctlCtx, execCmd []string, as ...string) error {
// use command as lock name
cmdArgs := append(cx.PrefixArgs(), "lock", execCmd[0])
cmdArgs = append(cmdArgs, execCmd...)
return spawnWithExpects(cmdArgs, as...)
return spawnWithExpects(cmdArgs, cx.envMap, as...)
}

View File

@ -83,7 +83,7 @@ func testMirrorCommand(cx ctlCtx, flags []string, sourcekvs []kv, destkvs []kvEx
cmdArgs := append(cx.PrefixArgs(), "make-mirror")
cmdArgs = append(cmdArgs, flags...)
cmdArgs = append(cmdArgs, fmt.Sprintf("localhost:%d", mirrorcfg.basePort))
proc, err := spawnCmd(cmdArgs)
proc, err := spawnCmd(cmdArgs, cx.envMap)
if err != nil {
cx.t.Fatal(err)
}

View File

@ -95,13 +95,13 @@ func ctlV3MemberList(cx ctlCtx) error {
for i := range lines {
lines[i] = "started"
}
return spawnWithExpects(cmdArgs, lines...)
return spawnWithExpects(cmdArgs, cx.envMap, lines...)
}
func getMemberList(cx ctlCtx) (etcdserverpb.MemberListResponse, error) {
cmdArgs := append(cx.PrefixArgs(), "--write-out", "json", "member", "list")
proc, err := spawnCmd(cmdArgs)
proc, err := spawnCmd(cmdArgs, cx.envMap)
if err != nil {
return etcdserverpb.MemberListResponse{}, err
}
@ -130,7 +130,7 @@ func memberListWithHexTest(cx ctlCtx) {
cmdArgs := append(cx.PrefixArgs(), "--write-out", "json", "--hex", "member", "list")
proc, err := spawnCmd(cmdArgs)
proc, err := spawnCmd(cmdArgs, cx.envMap)
if err != nil {
cx.t.Fatalf("memberListWithHexTest error (%v)", err)
}
@ -177,7 +177,7 @@ func memberRemoveTest(cx ctlCtx) {
func ctlV3MemberRemove(cx ctlCtx, ep, memberID, clusterID string) error {
cmdArgs := append(cx.prefixArgs([]string{ep}), "member", "remove", memberID)
return spawnWithExpect(cmdArgs, fmt.Sprintf("%s removed from cluster %s", memberID, clusterID))
return spawnWithExpectWithEnv(cmdArgs, cx.envMap, fmt.Sprintf("%s removed from cluster %s", memberID, clusterID))
}
func memberAddTest(cx ctlCtx) {
@ -197,7 +197,7 @@ func ctlV3MemberAdd(cx ctlCtx, peerURL string, isLearner bool) error {
if isLearner {
cmdArgs = append(cmdArgs, "--learner")
}
return spawnWithExpect(cmdArgs, " added to cluster ")
return spawnWithExpectWithEnv(cmdArgs, cx.envMap, " added to cluster ")
}
func memberUpdateTest(cx ctlCtx) {
@ -215,5 +215,5 @@ func memberUpdateTest(cx ctlCtx) {
func ctlV3MemberUpdate(cx ctlCtx, memberID, peerURL string) error {
cmdArgs := append(cx.PrefixArgs(), "member", "update", memberID, fmt.Sprintf("--peer-urls=%s", peerURL))
return spawnWithExpect(cmdArgs, " updated in cluster ")
return spawnWithExpectWithEnv(cmdArgs, cx.envMap, " updated in cluster ")
}

View File

@ -97,21 +97,22 @@ func testCtlV3MoveLeader(t *testing.T, cfg etcdProcessClusterConfig) {
}
tests := []struct {
prefixes []string
expect string
eps []string
expect string
}{
{ // request to non-leader
cx.prefixArgs([]string{cx.epc.EndpointsV3()[(leadIdx+1)%3]}),
[]string{cx.epc.EndpointsV3()[(leadIdx+1)%3]},
"no leader endpoint given at ",
},
{ // request to leader
cx.prefixArgs([]string{cx.epc.EndpointsV3()[leadIdx]}),
[]string{cx.epc.EndpointsV3()[leadIdx]},
fmt.Sprintf("Leadership transferred from %s to %s", types.ID(leaderID), types.ID(transferee)),
},
}
for i, tc := range tests {
cmdArgs := append(tc.prefixes, "move-leader", types.ID(transferee).String())
if err := spawnWithExpect(cmdArgs, tc.expect); err != nil {
prefix := cx.prefixArgs(tc.eps)
cmdArgs := append(prefix, "move-leader", types.ID(transferee).String())
if err := spawnWithExpectWithEnv(cmdArgs, cx.envMap, tc.expect); err != nil {
t.Fatalf("#%d: %v", i, err)
}
}

View File

@ -140,13 +140,13 @@ func ctlV3RoleMultiExpect(cx ctlCtx, args []string, expStr ...string) error {
cmdArgs := append(cx.PrefixArgs(), "role")
cmdArgs = append(cmdArgs, args...)
return spawnWithExpects(cmdArgs, expStr...)
return spawnWithExpects(cmdArgs, cx.envMap, expStr...)
}
func ctlV3Role(cx ctlCtx, args []string, expStr string) error {
cmdArgs := append(cx.PrefixArgs(), "role")
cmdArgs = append(cmdArgs, args...)
return spawnWithExpect(cmdArgs, expStr)
return spawnWithExpectWithEnv(cmdArgs, cx.envMap, expStr)
}
func ctlV3RoleGrantPermission(cx ctlCtx, rolename string, perm grantingPerm) error {
@ -160,7 +160,7 @@ func ctlV3RoleGrantPermission(cx ctlCtx, rolename string, perm grantingPerm) err
cmdArgs = append(cmdArgs, rolename)
cmdArgs = append(cmdArgs, grantingPermToArgs(perm)...)
proc, err := spawnCmd(cmdArgs)
proc, err := spawnCmd(cmdArgs, cx.envMap)
if err != nil {
return err
}
@ -186,7 +186,7 @@ func ctlV3RoleRevokePermission(cx ctlCtx, rolename string, key, rangeEnd string,
expStr = fmt.Sprintf("Permission of key %s is revoked from role %s", key, rolename)
}
proc, err := spawnCmd(cmdArgs)
proc, err := spawnCmd(cmdArgs, cx.envMap)
if err != nil {
return err
}

View File

@ -84,10 +84,11 @@ func snapshotCorruptTest(cx ctlCtx) {
datadir := cx.t.TempDir()
serr := spawnWithExpect(
serr := spawnWithExpectWithEnv(
append(cx.PrefixArgsUtl(), "snapshot", "restore",
"--data-dir", datadir,
fpath),
cx.envMap,
"expected sha256")
if serr != nil {
@ -117,10 +118,11 @@ func snapshotStatusBeforeRestoreTest(cx ctlCtx) {
dataDir := cx.t.TempDir()
defer os.RemoveAll(dataDir)
serr := spawnWithExpect(
serr := spawnWithExpectWithEnv(
append(cx.PrefixArgsUtl(), "snapshot", "restore",
"--data-dir", dataDir,
fpath),
cx.envMap,
"added member")
if serr != nil {
cx.t.Fatal(serr)
@ -129,13 +131,13 @@ func snapshotStatusBeforeRestoreTest(cx ctlCtx) {
func ctlV3SnapshotSave(cx ctlCtx, fpath string) error {
cmdArgs := append(cx.PrefixArgs(), "snapshot", "save", fpath)
return spawnWithExpect(cmdArgs, fmt.Sprintf("Snapshot saved at %s", fpath))
return spawnWithExpectWithEnv(cmdArgs, cx.envMap, fmt.Sprintf("Snapshot saved at %s", fpath))
}
func getSnapshotStatus(cx ctlCtx, fpath string) (snapshot.Status, error) {
cmdArgs := append(cx.PrefixArgsUtl(), "--write-out", "json", "snapshot", "status", fpath)
proc, err := spawnCmd(cmdArgs)
proc, err := spawnCmd(cmdArgs, nil)
if err != nil {
return snapshot.Status{}, err
}
@ -203,6 +205,7 @@ func testIssue6361(t *testing.T, etcdutl bool) {
t.Log("etcdctl saving snapshot...")
if err = spawnWithExpects(append(prefixArgs, "snapshot", "save", fpath),
nil,
fmt.Sprintf("Snapshot saved at %s", fpath),
); err != nil {
t.Fatal(err)
@ -264,7 +267,7 @@ func testIssue6361(t *testing.T, etcdutl bool) {
nepc, err = spawnCmd([]string{epc.procs[0].Config().execPath, "--name", name2,
"--listen-client-urls", clientURL, "--advertise-client-urls", clientURL,
"--listen-peer-urls", peerURL, "--initial-advertise-peer-urls", peerURL,
"--initial-cluster", initialCluster2, "--initial-cluster-state", "existing", "--data-dir", newDataDir2})
"--initial-cluster", initialCluster2, "--initial-cluster-state", "existing", "--data-dir", newDataDir2}, nil)
if err != nil {
t.Fatal(err)
}

View File

@ -104,7 +104,7 @@ func clusterVersionTest(cx ctlCtx, expected string) {
func ctlV3Version(cx ctlCtx) error {
cmdArgs := append(cx.PrefixArgs(), "version")
return spawnWithExpect(cmdArgs, version.Version)
return spawnWithExpectWithEnv(cmdArgs, cx.envMap, version.Version)
}
// TestCtlV3DialWithHTTPScheme ensures that client handles endpoints with HTTPS scheme.
@ -114,7 +114,7 @@ func TestCtlV3DialWithHTTPScheme(t *testing.T) {
func dialWithSchemeTest(cx ctlCtx) {
cmdArgs := append(cx.prefixArgs(cx.epc.EndpointsV3()), "put", "foo", "bar")
if err := spawnWithExpect(cmdArgs, "OK"); err != nil {
if err := spawnWithExpectWithEnv(cmdArgs, cx.envMap, "OK"); err != nil {
cx.t.Fatal(err)
}
}
@ -129,7 +129,7 @@ type ctlCtx struct {
epc *etcdProcessCluster
envMap map[string]struct{}
envMap map[string]string
dialTimeout time.Duration
@ -201,7 +201,7 @@ func withApiPrefix(p string) ctlOption {
}
func withFlagByEnv() ctlOption {
return func(cx *ctlCtx) { cx.envMap = make(map[string]struct{}) }
return func(cx *ctlCtx) { cx.envMap = make(map[string]string) }
}
func withEtcdutl() ctlOption {
@ -248,6 +248,7 @@ func testCtlWithOffline(t *testing.T, testFunc func(ctlCtx), testOfflineFunc fun
for k := range ret.envMap {
os.Unsetenv(k)
}
ret.envMap = make(map[string]string)
}
if ret.epc != nil {
if errC := ret.epc.Close(); errC != nil {
@ -311,8 +312,7 @@ func (cx *ctlCtx) prefixArgs(eps []string) []string {
for k, v := range fmap {
if useEnv {
ek := flags.FlagToEnv("ETCDCTL", k)
os.Setenv(ek, v)
cx.envMap[ek] = struct{}{}
cx.envMap[ek] = v
} else {
cmdArgs = append(cmdArgs, fmt.Sprintf("--%s=%s", k, v))
}

View File

@ -102,7 +102,7 @@ func ctlV3Txn(cx ctlCtx, rqs txnRequests) error {
if cx.interactive {
cmdArgs = append(cmdArgs, "--interactive")
}
proc, err := spawnCmd(cmdArgs)
proc, err := spawnCmd(cmdArgs, cx.envMap)
if err != nil {
return err
}

View File

@ -179,7 +179,7 @@ func ctlV3User(cx ctlCtx, args []string, expStr string, stdIn []string) error {
cmdArgs := append(cx.PrefixArgs(), "user")
cmdArgs = append(cmdArgs, args...)
proc, err := spawnCmd(cmdArgs)
proc, err := spawnCmd(cmdArgs, cx.envMap)
if err != nil {
return err
}

View File

@ -35,7 +35,7 @@ func setupWatchArgs(cx ctlCtx, args []string) []string {
func ctlV3Watch(cx ctlCtx, args []string, kvs ...kvExec) error {
cmdArgs := setupWatchArgs(cx, args)
proc, err := spawnCmd(cmdArgs)
proc, err := spawnCmd(cmdArgs, nil)
if err != nil {
return err
}
@ -66,7 +66,7 @@ func ctlV3Watch(cx ctlCtx, args []string, kvs ...kvExec) error {
func ctlV3WatchFailPerm(cx ctlCtx, args []string) error {
cmdArgs := setupWatchArgs(cx, args)
proc, err := spawnCmd(cmdArgs)
proc, err := spawnCmd(cmdArgs, nil)
if err != nil {
return err
}

View File

@ -29,7 +29,7 @@ const exampleConfigFile = "../../etcd.conf.yml.sample"
func TestEtcdExampleConfig(t *testing.T) {
skipInShortMode(t)
proc, err := spawnCmd([]string{binDir + "/etcd", "--config-file", exampleConfigFile})
proc, err := spawnCmd([]string{binDir + "/etcd", "--config-file", exampleConfigFile}, nil)
if err != nil {
t.Fatal(err)
}
@ -75,7 +75,7 @@ func TestEtcdMultiPeer(t *testing.T) {
"--initial-advertise-peer-urls", fmt.Sprintf("http://127.0.0.1:%d", etcdProcessBasePort+i),
"--initial-cluster", ic,
}
p, err := spawnCmd(args)
p, err := spawnCmd(args, nil)
if err != nil {
t.Fatal(err)
}
@ -106,7 +106,7 @@ func TestEtcdUnixPeers(t *testing.T) {
"--listen-peer-urls", "unix://etcd.unix:1",
"--initial-advertise-peer-urls", "unix://etcd.unix:1",
"--initial-cluster", "e1=unix://etcd.unix:1",
},
}, nil,
)
defer os.Remove("etcd.unix:1")
if err != nil {
@ -183,7 +183,7 @@ func TestEtcdPeerCNAuth(t *testing.T) {
commonArgs = append(commonArgs, args...)
p, err := spawnCmd(commonArgs)
p, err := spawnCmd(commonArgs, nil)
if err != nil {
t.Fatal(err)
}
@ -262,7 +262,7 @@ func TestEtcdPeerNameAuth(t *testing.T) {
commonArgs = append(commonArgs, args...)
p, err := spawnCmd(commonArgs)
p, err := spawnCmd(commonArgs, nil)
if err != nil {
t.Fatal(err)
}
@ -308,7 +308,7 @@ func TestGrpcproxyAndCommonName(t *testing.T) {
t.Errorf("Unexpected error: %s", err)
}
p, err := spawnCmd(argsWithEmptyCN)
p, err := spawnCmd(argsWithEmptyCN, nil)
defer func() {
if p != nil {
p.Stop()
@ -323,7 +323,7 @@ func TestGrpcproxyAndCommonName(t *testing.T) {
func TestBootstrapDefragFlag(t *testing.T) {
skipInShortMode(t)
proc, err := spawnCmd([]string{binDir + "/etcd", "--experimental-bootstrap-defrag-threshold-megabytes", "1000"})
proc, err := spawnCmd([]string{binDir + "/etcd", "--experimental-bootstrap-defrag-threshold-megabytes", "1000"}, nil)
if err != nil {
t.Fatal(err)
}

View File

@ -87,7 +87,7 @@ func corruptTest(cx ctlCtx) {
cx.t.Log("restarting etcd[0]")
ep := cx.epc.procs[0]
proc, err := spawnCmd(append([]string{ep.Config().execPath}, ep.Config().args...))
proc, err := spawnCmd(append([]string{ep.Config().execPath}, ep.Config().args...), cx.envMap)
if err != nil {
cx.t.Fatal(err)
}

View File

@ -43,6 +43,11 @@ type etcdProcess interface {
Close() error
WithStopSignal(sig os.Signal) os.Signal
Config() *etcdServerProcessConfig
Logs() logsExpect
}
type logsExpect interface {
Expect(string) (string, error)
}
type etcdServerProcess struct {
@ -56,6 +61,7 @@ type etcdServerProcessConfig struct {
execPath string
args []string
tlsArgs []string
envVars map[string]string
dataDirPath string
keepDataDir bool
@ -92,7 +98,7 @@ func (ep *etcdServerProcess) Start() error {
panic("already started")
}
ep.cfg.lg.Info("starting server...", zap.String("name", ep.cfg.name))
proc, err := spawnCmdWithLogger(ep.cfg.lg, append([]string{ep.cfg.execPath}, ep.cfg.args...))
proc, err := spawnCmdWithLogger(ep.cfg.lg, append([]string{ep.cfg.execPath}, ep.cfg.args...), ep.cfg.envVars)
if err != nil {
return err
}
@ -163,3 +169,10 @@ func (ep *etcdServerProcess) waitReady() error {
}
func (ep *etcdServerProcess) Config() *etcdServerProcessConfig { return ep.cfg }
func (ep *etcdServerProcess) Logs() logsExpect {
if ep.proc == nil {
ep.cfg.lg.Panic("Please grap logs before process is stopped")
}
return ep.proc
}

View File

@ -18,6 +18,7 @@
package e2e
import (
"fmt"
"os"
"strings"
@ -27,20 +28,41 @@ import (
const noOutputLineCount = 0 // regular binaries emit no extra lines
func spawnCmd(args []string) (*expect.ExpectProcess, error) {
return spawnCmdWithLogger(zap.NewNop(), args)
func spawnCmd(args []string, envVars map[string]string) (*expect.ExpectProcess, error) {
return spawnCmdWithLogger(zap.NewNop(), args, envVars)
}
func spawnCmdWithLogger(lg *zap.Logger, args []string) (*expect.ExpectProcess, error) {
func spawnCmdWithLogger(lg *zap.Logger, args []string, envVars map[string]string) (*expect.ExpectProcess, error) {
wd, err := os.Getwd()
if err != nil {
return nil, err
}
env := mergeEnvVariables(envVars)
if strings.HasSuffix(args[0], "/etcdctl3") {
env := append(os.Environ(), "ETCDCTL_API=3")
lg.Info("spawning process with ETCDCTL_API=3", zap.Strings("args", args), zap.String("working-dir", wd))
env = append(env, "ETCDCTL_API=3")
lg.Info("spawning process with ETCDCTL_API=3", zap.Strings("args", args), zap.String("working-dir", wd), zap.Strings("environment-variables", env))
return expect.NewExpectWithEnv(ctlBinPath, args[1:], env)
}
lg.Info("spawning process", zap.Strings("args", args), zap.String("working-dir", wd))
return expect.NewExpect(args[0], args[1:]...)
lg.Info("spawning process", zap.Strings("args", args), zap.String("working-dir", wd), zap.Strings("environment-variables", env))
return expect.NewExpectWithEnv(args[0], args[1:], env)
}
func mergeEnvVariables(envVars map[string]string) []string {
var env []string
// Environment variables are passed as parameter have higher priority
// than os environment variables.
for k, v := range envVars {
env = append(env, fmt.Sprintf("%s=%s", k, v))
}
// Now, we can set os environment variables not passed as parameter.
currVars := os.Environ()
for _, v := range currVars {
p := strings.Split(v, "=")
if _, ok := envVars[p[0]]; !ok {
env = append(env, fmt.Sprintf("%s=%s", p[0], p[1]))
}
}
return env
}

View File

@ -40,16 +40,20 @@ func waitReadyExpectProc(exproc *expect.ExpectProcess, readyStrs []string) error
}
func spawnWithExpect(args []string, expected string) error {
return spawnWithExpects(args, []string{expected}...)
return spawnWithExpects(args, nil, []string{expected}...)
}
func spawnWithExpects(args []string, xs ...string) error {
_, err := spawnWithExpectLines(args, xs...)
func spawnWithExpectWithEnv(args []string, envVars map[string]string, expected string) error {
return spawnWithExpects(args, envVars, []string{expected}...)
}
func spawnWithExpects(args []string, envVars map[string]string, xs ...string) error {
_, err := spawnWithExpectLines(args, envVars, xs...)
return err
}
func spawnWithExpectLines(args []string, xs ...string) ([]string, error) {
proc, err := spawnCmd(args)
func spawnWithExpectLines(args []string, envVars map[string]string, xs ...string) ([]string, error) {
proc, err := spawnCmd(args, envVars)
if err != nil {
return nil, err
}

View File

@ -63,7 +63,7 @@ func assertVerifyCanStartV2deprecationNotYet(t testing.TB, dataDirPath string) {
func assertVerifyCannotStartV2deprecationWriteOnly(t testing.TB, dataDirPath string) {
t.Log("Verify its infeasible to start etcd with --v2-deprecation=write-only mode")
proc, err := spawnCmd([]string{binDir + "/etcd", "--v2-deprecation=write-only", "--data-dir=" + dataDirPath})
proc, err := spawnCmd([]string{binDir + "/etcd", "--v2-deprecation=write-only", "--data-dir=" + dataDirPath}, nil)
assert.NoError(t, err)
_, err = proc.Expect("detected disallowed custom content in v2store for stage --v2-deprecation=write-only")
@ -90,7 +90,7 @@ func TestV2Deprecation(t *testing.T) {
func TestV2DeprecationWriteOnlyNoV2Api(t *testing.T) {
BeforeTest(t)
proc, err := spawnCmd([]string{binDir + "/etcd", "--v2-deprecation=write-only", "--enable-v2"})
proc, err := spawnCmd([]string{binDir + "/etcd", "--v2-deprecation=write-only", "--enable-v2"}, nil)
assert.NoError(t, err)
_, err = proc.Expect("--enable-v2 and --v2-deprecation=write-only are mutually exclusive")

View File

@ -243,7 +243,7 @@ func testV3CurlAuth(cx ctlCtx) {
)
cmdArgs = cURLPrefixArgs(cx.epc, "POST", cURLReq{endpoint: path.Join(p, "/auth/authenticate"), value: string(authreq)})
proc, err := spawnCmd(cmdArgs)
proc, err := spawnCmd(cmdArgs, cx.envMap)
testutil.AssertNil(cx.t, err)
defer proc.Close()
@ -285,7 +285,7 @@ func testV3CurlCampaign(cx ctlCtx) {
endpoint: path.Join(cx.apiPrefix, "/election/campaign"),
value: string(cdata),
})
lines, err := spawnWithExpectLines(cargs, `"leader":{"name":"`)
lines, err := spawnWithExpectLines(cargs, cx.envMap, `"leader":{"name":"`)
if err != nil {
cx.t.Fatalf("failed post campaign request (%s) (%v)", cx.apiPrefix, err)
}

View File

@ -474,7 +474,7 @@ func (srv *Server) handle_INITIAL_START_ETCD(req *rpcpb.Request) (*rpcpb.Respons
}, nil
}
err := fileutil.TouchDirAll(srv.Member.BaseDir)
err := fileutil.TouchDirAll(srv.lg, srv.Member.BaseDir)
if err != nil {
return nil, err
}
@ -509,7 +509,7 @@ func (srv *Server) handle_INITIAL_START_ETCD(req *rpcpb.Request) (*rpcpb.Respons
func (srv *Server) handle_RESTART_ETCD(req *rpcpb.Request) (*rpcpb.Response, error) {
var err error
if !fileutil.Exist(srv.Member.BaseDir) {
err = fileutil.TouchDirAll(srv.Member.BaseDir)
err = fileutil.TouchDirAll(srv.lg, srv.Member.BaseDir)
if err != nil {
return nil, err
}
@ -580,7 +580,7 @@ func (srv *Server) handle_SIGQUIT_ETCD_AND_REMOVE_DATA() (*rpcpb.Response, error
// create a new log file for next new member restart
if !fileutil.Exist(srv.Member.BaseDir) {
err = fileutil.TouchDirAll(srv.Member.BaseDir)
err = fileutil.TouchDirAll(srv.lg, srv.Member.BaseDir)
if err != nil {
return nil, err
}
@ -652,6 +652,7 @@ func (srv *Server) handle_SIGQUIT_ETCD_AND_ARCHIVE_DATA() (*rpcpb.Response, erro
// TODO: support separate WAL directory
if err = archive(
srv.lg,
srv.Member.BaseDir,
srv.Member.Etcd.LogOutputs[0],
srv.Member.Etcd.DataDir,

View File

@ -25,15 +25,17 @@ import (
"time"
"go.etcd.io/etcd/client/pkg/v3/fileutil"
"go.uber.org/zap"
)
// TODO: support separate WAL directory
func archive(baseDir, etcdLogPath, dataDir string) error {
func archive(lg *zap.Logger, baseDir, etcdLogPath, dataDir string) error {
dir := filepath.Join(baseDir, "etcd-failure-archive", time.Now().Format(time.RFC3339))
if existDir(dir) {
dir = filepath.Join(baseDir, "etcd-failure-archive", time.Now().Add(time.Second).Format(time.RFC3339))
}
if err := fileutil.TouchDirAll(dir); err != nil {
if err := fileutil.TouchDirAll(lg, dir); err != nil {
return err
}

View File

@ -524,7 +524,7 @@ func (clus *Cluster) sendOpWithResp(idx int, op rpcpb.Operation) (*rpcpb.Respons
"fixtures",
"client",
)
if err = fileutil.TouchDirAll(dirClient); err != nil {
if err = fileutil.TouchDirAll(clus.lg, dirClient); err != nil {
return nil, err
}

View File

@ -38,7 +38,7 @@ func (clus *Cluster) Run(t *testing.T) {
// needs to obtain all the failpoints from the etcd member.
clus.updateCases()
if err := fileutil.TouchDirAll(clus.Tester.DataDir); err != nil {
if err := fileutil.TouchDirAll(clus.lg, clus.Tester.DataDir); err != nil {
clus.lg.Panic(
"failed to create test data directory",
zap.String("dir", clus.Tester.DataDir),

View File

@ -15,22 +15,22 @@
package integration
import (
"fmt"
"io"
"io/ioutil"
"net"
"sync"
"go.etcd.io/etcd/client/pkg/v3/transport"
)
// bridge creates a unix socket bridge to another unix socket, making it possible
type Dialer interface {
Dial() (net.Conn, error)
}
// bridge proxies connections between listener and dialer, making it possible
// to disconnect grpc network connections without closing the logical grpc connection.
type bridge struct {
inaddr string
outaddr string
l net.Listener
conns map[*bridgeConn]struct{}
dialer Dialer
l net.Listener
conns map[*bridgeConn]struct{}
stopc chan struct{}
pausec chan struct{}
@ -40,30 +40,22 @@ type bridge struct {
mu sync.Mutex
}
func newBridge(addr string) (*bridge, error) {
func newBridge(dialer Dialer, listener net.Listener) (*bridge, error) {
b := &bridge{
// bridge "port" is ("%05d%05d0", port, pid) since go1.8 expects the port to be a number
inaddr: addr + "0",
outaddr: addr,
dialer: dialer,
l: listener,
conns: make(map[*bridgeConn]struct{}),
stopc: make(chan struct{}),
pausec: make(chan struct{}),
blackholec: make(chan struct{}),
}
close(b.pausec)
l, err := transport.NewUnixListener(b.inaddr)
if err != nil {
return nil, fmt.Errorf("listen failed on socket %s (%v)", addr, err)
}
b.l = l
b.wg.Add(1)
go b.serveListen()
return b, nil
}
func (b *bridge) URL() string { return "unix://" + b.inaddr }
func (b *bridge) Close() {
b.l.Close()
b.mu.Lock()
@ -76,7 +68,7 @@ func (b *bridge) Close() {
b.wg.Wait()
}
func (b *bridge) Reset() {
func (b *bridge) DropConnections() {
b.mu.Lock()
defer b.mu.Unlock()
for bc := range b.conns {
@ -85,13 +77,13 @@ func (b *bridge) Reset() {
b.conns = make(map[*bridgeConn]struct{})
}
func (b *bridge) Pause() {
func (b *bridge) PauseConnections() {
b.mu.Lock()
b.pausec = make(chan struct{})
b.mu.Unlock()
}
func (b *bridge) Unpause() {
func (b *bridge) UnpauseConnections() {
b.mu.Lock()
select {
case <-b.pausec:
@ -127,7 +119,7 @@ func (b *bridge) serveListen() {
case <-pausec:
}
outc, oerr := net.Dial("unix", b.outaddr)
outc, oerr := b.dialer.Dial()
if oerr != nil {
inc.Close()
return

View File

@ -38,10 +38,11 @@ func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) {
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
Size: 2,
GRPCKeepAliveMinTime: time.Millisecond, // avoid too_many_pings
UseBridge: true,
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()}
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL()}
ccfg := clientv3.Config{
Endpoints: []string{eps[0]},
@ -76,7 +77,7 @@ func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) {
// give enough time for balancer resolution
time.Sleep(5 * time.Second)
clus.Members[0].Blackhole()
clus.Members[0].Bridge().Blackhole()
if _, err = clus.Client(1).Put(context.TODO(), "foo", "bar"); err != nil {
t.Fatal(err)
@ -87,12 +88,12 @@ func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) {
t.Error("took too long to receive watch events")
}
clus.Members[0].Unblackhole()
clus.Members[0].Bridge().Unblackhole()
// waiting for moving eps[0] out of unhealthy, so that it can be re-pined.
time.Sleep(ccfg.DialTimeout)
clus.Members[1].Blackhole()
clus.Members[1].Bridge().Blackhole()
// make sure client[0] can connect to eps[0] after remove the blackhole.
if _, err = clus.Client(0).Get(context.TODO(), "foo"); err != nil {
@ -170,10 +171,11 @@ func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Clien
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
Size: 2,
SkipCreatingClient: true,
UseBridge: true,
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()}
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL()}
ccfg := clientv3.Config{
Endpoints: []string{eps[0]},
@ -194,7 +196,7 @@ func testBalancerUnderBlackholeNoKeepAlive(t *testing.T, op func(*clientv3.Clien
cli.SetEndpoints(eps...)
// blackhole eps[0]
clus.Members[0].Blackhole()
clus.Members[0].Bridge().Blackhole()
// With round robin balancer, client will make a request to a healthy endpoint
// within a few requests.

View File

@ -57,7 +57,7 @@ func TestDialTLSExpired(t *testing.T) {
}
// expect remote errors "tls: bad certificate"
_, err = integration.NewClient(t, clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCAddr()},
Endpoints: []string{clus.Members[0].GRPCURL()},
DialTimeout: 3 * time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
TLS: tls,
@ -75,7 +75,7 @@ func TestDialTLSNoConfig(t *testing.T) {
defer clus.Terminate(t)
// expect "signed by unknown authority"
c, err := integration.NewClient(t, clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCAddr()},
Endpoints: []string{clus.Members[0].GRPCURL()},
DialTimeout: time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
})
@ -108,7 +108,7 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) {
// get endpoint list
eps := make([]string, 3)
for i := range eps {
eps[i] = clus.Members[i].GRPCAddr()
eps[i] = clus.Members[i].GRPCURL()
}
toKill := rand.Intn(len(eps))
@ -149,7 +149,7 @@ func TestSwitchSetEndpoints(t *testing.T) {
defer clus.Terminate(t)
// get non partitioned members endpoints
eps := []string{clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
eps := []string{clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()}
cli := clus.Client(0)
clus.Members[0].InjectPartition(t, clus.Members[1:]...)
@ -170,7 +170,7 @@ func TestRejectOldCluster(t *testing.T) {
defer clus.Terminate(t)
cfg := clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()},
Endpoints: []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL()},
DialTimeout: 5 * time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
RejectOldCluster: true,
@ -212,7 +212,7 @@ func TestSetEndpointAndPut(t *testing.T) {
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
defer clus.Terminate(t)
clus.Client(1).SetEndpoints(clus.Members[0].GRPCAddr())
clus.Client(1).SetEndpoints(clus.Members[0].GRPCURL())
_, err := clus.Client(1).Put(context.TODO(), "foo", "bar")
if err != nil && !strings.Contains(err.Error(), "closing") {
t.Fatal(err)

View File

@ -111,7 +111,7 @@ func testBalancerUnderNetworkPartition(t *testing.T, op func(*clientv3.Client, c
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()}
// expect pin eps[0]
ccfg := clientv3.Config{
@ -166,7 +166,7 @@ func TestBalancerUnderNetworkPartitionLinearizableGetLeaderElection(t *testing.T
SkipCreatingClient: true,
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()}
lead := clus.WaitLeader(t)
@ -222,7 +222,7 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()}
target := clus.WaitLeader(t)
if !isolateLeader {
@ -283,7 +283,7 @@ func TestDropReadUnderNetworkPartition(t *testing.T) {
defer clus.Terminate(t)
leaderIndex := clus.WaitLeader(t)
// get a follower endpoint
eps := []string{clus.Members[(leaderIndex+1)%3].GRPCAddr()}
eps := []string{clus.Members[(leaderIndex+1)%3].GRPCURL()}
ccfg := clientv3.Config{
Endpoints: eps,
DialTimeout: 10 * time.Second,
@ -301,7 +301,7 @@ func TestDropReadUnderNetworkPartition(t *testing.T) {
// add other endpoints for later endpoint switch
cli.SetEndpoints(eps...)
time.Sleep(time.Second * 2)
conn, err := cli.Dial(clus.Members[(leaderIndex+1)%3].GRPCAddr())
conn, err := cli.Dial(clus.Members[(leaderIndex+1)%3].GRPCURL())
if err != nil {
t.Fatal(err)
}

View File

@ -35,10 +35,11 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) {
clus := integration.NewClusterV3(t, &integration.ClusterConfig{
Size: 3,
SkipCreatingClient: true,
UseBridge: true,
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()}
lead := clus.WaitLeader(t)
@ -150,7 +151,7 @@ func testBalancerUnderServerShutdownMutable(t *testing.T, op func(*clientv3.Clie
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()}
// pin eps[0]
cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}})
@ -208,7 +209,7 @@ func testBalancerUnderServerShutdownImmutable(t *testing.T, op func(*clientv3.Cl
})
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr(), clus.Members[2].GRPCAddr()}
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL(), clus.Members[2].GRPCURL()}
// pin eps[0]
cli, err := integration.NewClient(t, clientv3.Config{Endpoints: []string{eps[0]}})
@ -278,6 +279,7 @@ func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizabl
cfg := &integration.ClusterConfig{
Size: 2,
SkipCreatingClient: true,
UseBridge: true,
}
if linearizable {
cfg.Size = 3
@ -285,9 +287,9 @@ func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizabl
clus := integration.NewClusterV3(t, cfg)
defer clus.Terminate(t)
eps := []string{clus.Members[0].GRPCAddr(), clus.Members[1].GRPCAddr()}
eps := []string{clus.Members[0].GRPCURL(), clus.Members[1].GRPCURL()}
if linearizable {
eps = append(eps, clus.Members[2].GRPCAddr())
eps = append(eps, clus.Members[2].GRPCURL())
}
lead := clus.WaitLeader(t)

View File

@ -712,7 +712,7 @@ func TestKVGetRetry(t *testing.T) {
integration.BeforeTest(t)
clusterSize := 3
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: clusterSize})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: clusterSize, UseBridge: true})
defer clus.Terminate(t)
// because killing leader and following election
@ -765,7 +765,7 @@ func TestKVGetRetry(t *testing.T) {
func TestKVPutFailGetRetry(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
kv := clus.Client(0)
@ -876,7 +876,7 @@ func TestKVPutStoppedServerAndClose(t *testing.T) {
// in the presence of network errors.
func TestKVPutAtMostOnce(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
if _, err := clus.Client(0).Put(context.TODO(), "k", "1"); err != nil {
@ -884,12 +884,12 @@ func TestKVPutAtMostOnce(t *testing.T) {
}
for i := 0; i < 10; i++ {
clus.Members[0].DropConnections()
clus.Members[0].Bridge().DropConnections()
donec := make(chan struct{})
go func() {
defer close(donec)
for i := 0; i < 10; i++ {
clus.Members[0].DropConnections()
clus.Members[0].Bridge().DropConnections()
time.Sleep(5 * time.Millisecond)
}
}()
@ -1027,7 +1027,7 @@ func TestKVForLearner(t *testing.T) {
// 1. clus.Members[3] is the newly added learner member, which was appended to clus.Members
// 2. we are using member's grpcAddr instead of clientURLs as the endpoint for clientv3.Config,
// because the implementation of integration test has diverged from embed/etcd.go.
learnerEp := clus.Members[3].GRPCAddr()
learnerEp := clus.Members[3].GRPCURL()
cfg := clientv3.Config{
Endpoints: []string{learnerEp},
DialTimeout: 5 * time.Second,
@ -1100,7 +1100,7 @@ func TestBalancerSupportLearner(t *testing.T) {
}
// clus.Members[3] is the newly added learner member, which was appended to clus.Members
learnerEp := clus.Members[3].GRPCAddr()
learnerEp := clus.Members[3].GRPCURL()
cfg := clientv3.Config{
Endpoints: []string{learnerEp},
DialTimeout: 5 * time.Second,
@ -1120,7 +1120,7 @@ func TestBalancerSupportLearner(t *testing.T) {
}
t.Logf("Expected: Read from learner error: %v", err)
eps := []string{learnerEp, clus.Members[0].GRPCAddr()}
eps := []string{learnerEp, clus.Members[0].GRPCURL()}
cli.SetEndpoints(eps...)
if _, err := cli.Get(context.Background(), "foo"); err != nil {
t.Errorf("expect no error (balancer should retry when request to learner fails), got error: %v", err)

View File

@ -190,7 +190,7 @@ func TestLeaseKeepAliveHandleFailure(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
// TODO: change this line to get a cluster client
@ -416,7 +416,7 @@ func TestLeaseRevokeNewAfterClose(t *testing.T) {
func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
cli := clus.Client(0)
@ -462,7 +462,7 @@ func TestLeaseKeepAliveCloseAfterDisconnectRevoke(t *testing.T) {
func TestLeaseKeepAliveInitTimeout(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
cli := clus.Client(0)
@ -495,7 +495,7 @@ func TestLeaseKeepAliveInitTimeout(t *testing.T) {
func TestLeaseKeepAliveTTLTimeout(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
cli := clus.Client(0)
@ -530,7 +530,7 @@ func TestLeaseKeepAliveTTLTimeout(t *testing.T) {
func TestLeaseTimeToLive(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
c := clus.RandClient()
@ -656,7 +656,7 @@ func TestLeaseLeases(t *testing.T) {
func TestLeaseRenewLostQuorum(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
cli := clus.Client(0)
@ -728,7 +728,7 @@ func TestLeaseKeepAliveLoopExit(t *testing.T) {
// transient cluster failure.
func TestV3LeaseFailureOverlap(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, UseBridge: true})
defer clus.Terminate(t)
numReqs := 5
@ -782,7 +782,7 @@ func TestV3LeaseFailureOverlap(t *testing.T) {
func TestLeaseWithRequireLeader(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, UseBridge: true})
defer clus.Terminate(t)
c := clus.Client(0)

View File

@ -195,7 +195,7 @@ func TestLeasingPutInvalidateExisting(t *testing.T) {
// TestLeasingGetNoLeaseTTL checks a key with a TTL is not leased.
func TestLeasingGetNoLeaseTTL(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
@ -224,7 +224,7 @@ func TestLeasingGetNoLeaseTTL(t *testing.T) {
// when the etcd cluster is partitioned.
func TestLeasingGetSerializable(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
@ -326,7 +326,7 @@ func TestLeasingRevGet(t *testing.T) {
// TestLeasingGetWithOpts checks options that can be served through the cache do not depend on the server.
func TestLeasingGetWithOpts(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
@ -418,7 +418,7 @@ func TestLeasingConcurrentPut(t *testing.T) {
func TestLeasingDisconnectedGet(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
@ -550,7 +550,7 @@ func TestLeasingOverwriteResponse(t *testing.T) {
func TestLeasingOwnerPutResponse(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
@ -617,7 +617,7 @@ func TestLeasingTxnOwnerGetRange(t *testing.T) {
func TestLeasingTxnOwnerGet(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
client := clus.Client(0)
@ -773,7 +773,7 @@ func TestLeasingTxnOwnerDelete(t *testing.T) {
func TestLeasingTxnOwnerIf(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
@ -867,7 +867,7 @@ func TestLeasingTxnOwnerIf(t *testing.T) {
func TestLeasingTxnCancel(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
lkv1, closeLKV1, err := leasing.NewKV(clus.Client(0), "pfx/")
@ -1085,7 +1085,7 @@ func TestLeasingTxnRandIfThenOrElse(t *testing.T) {
func TestLeasingOwnerPutError(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
@ -1106,7 +1106,7 @@ func TestLeasingOwnerPutError(t *testing.T) {
func TestLeasingOwnerDeleteError(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
@ -1127,7 +1127,7 @@ func TestLeasingOwnerDeleteError(t *testing.T) {
func TestLeasingNonOwnerPutError(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
@ -1201,7 +1201,7 @@ func testLeasingOwnerDelete(t *testing.T, del clientv3.Op) {
func TestLeasingDeleteRangeBounds(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
delkv, closeDelKV, err := leasing.NewKV(clus.Client(0), "0/")
@ -1376,7 +1376,7 @@ func TestLeasingPutGetDeleteConcurrent(t *testing.T) {
// disconnected when trying to submit revoke txn.
func TestLeasingReconnectOwnerRevoke(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
lkv1, closeLKV1, err1 := leasing.NewKV(clus.Client(0), "foo/")
@ -1437,7 +1437,7 @@ func TestLeasingReconnectOwnerRevoke(t *testing.T) {
// disconnected and the watch is compacted.
func TestLeasingReconnectOwnerRevokeCompact(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
lkv1, closeLKV1, err1 := leasing.NewKV(clus.Client(0), "foo/")
@ -1490,7 +1490,7 @@ func TestLeasingReconnectOwnerRevokeCompact(t *testing.T) {
// not cause inconsistency between the server and the client.
func TestLeasingReconnectOwnerConsistency(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
@ -1510,11 +1510,11 @@ func TestLeasingReconnectOwnerConsistency(t *testing.T) {
for i := 0; i < 10; i++ {
v := fmt.Sprintf("%d", i)
donec := make(chan struct{})
clus.Members[0].DropConnections()
clus.Members[0].Bridge().DropConnections()
go func() {
defer close(donec)
for i := 0; i < 20; i++ {
clus.Members[0].DropConnections()
clus.Members[0].Bridge().DropConnections()
time.Sleep(time.Millisecond)
}
}()
@ -1650,7 +1650,7 @@ func TestLeasingTxnAtomicCache(t *testing.T) {
// TestLeasingReconnectTxn checks that Txn is resilient to disconnects.
func TestLeasingReconnectTxn(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
@ -1664,9 +1664,9 @@ func TestLeasingReconnectTxn(t *testing.T) {
donec := make(chan struct{})
go func() {
defer close(donec)
clus.Members[0].DropConnections()
clus.Members[0].Bridge().DropConnections()
for i := 0; i < 10; i++ {
clus.Members[0].DropConnections()
clus.Members[0].Bridge().DropConnections()
time.Sleep(time.Millisecond)
}
time.Sleep(10 * time.Millisecond)
@ -1686,7 +1686,7 @@ func TestLeasingReconnectTxn(t *testing.T) {
// not cause inconsistency between the server and the client.
func TestLeasingReconnectNonOwnerGet(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
@ -1704,11 +1704,11 @@ func TestLeasingReconnectNonOwnerGet(t *testing.T) {
n := 0
for i := 0; i < 10; i++ {
donec := make(chan struct{})
clus.Members[0].DropConnections()
clus.Members[0].Bridge().DropConnections()
go func() {
defer close(donec)
for j := 0; j < 10; j++ {
clus.Members[0].DropConnections()
clus.Members[0].Bridge().DropConnections()
time.Sleep(time.Millisecond)
}
}()
@ -1814,7 +1814,7 @@ func TestLeasingDo(t *testing.T) {
func TestLeasingTxnOwnerPutBranch(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/")
@ -1908,7 +1908,7 @@ func randCmps(pfx string, dat []*clientv3.PutResponse) (cmps []clientv3.Cmp, the
func TestLeasingSessionExpire(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/", concurrency.WithTTL(1))
@ -1984,7 +1984,7 @@ func TestLeasingSessionExpireCancel(t *testing.T) {
for i := range tests {
t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "foo/", concurrency.WithTTL(1))

View File

@ -56,7 +56,7 @@ func TestMaintenanceHashKV(t *testing.T) {
if _, err := cli.Get(context.TODO(), "foo"); err != nil {
t.Fatal(err)
}
hresp, err := cli.HashKV(context.Background(), clus.Members[i].GRPCAddr(), 0)
hresp, err := cli.HashKV(context.Background(), clus.Members[i].GRPCURL(), 0)
if err != nil {
t.Fatal(err)
}
@ -192,7 +192,7 @@ func TestMaintenanceSnapshotErrorInflight(t *testing.T) {
func testMaintenanceSnapshotErrorInflight(t *testing.T, snapshot func(context.Context, *clientv3.Client) (io.ReadCloser, error)) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
// take about 1-second to read snapshot
@ -279,7 +279,7 @@ func TestMaintenanceStatus(t *testing.T) {
eps := make([]string, 3)
for i := 0; i < 3; i++ {
eps[i] = clus.Members[i].GRPCAddr()
eps[i] = clus.Members[i].GRPCURL()
}
cli, err := integration.NewClient(t, clientv3.Config{Endpoints: eps, DialOptions: []grpc.DialOption{grpc.WithBlock()}})

View File

@ -75,7 +75,7 @@ func TestV3ClientMetrics(t *testing.T) {
defer clus.Terminate(t)
cfg := clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCAddr()},
Endpoints: []string{clus.Members[0].GRPCURL()},
DialOptions: []grpc.DialOption{
grpc.WithUnaryInterceptor(grpcprom.UnaryClientInterceptor),
grpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor),

View File

@ -30,14 +30,14 @@ func TestDetectKvOrderViolation(t *testing.T) {
var errOrderViolation = errors.New("DetectedOrderViolation")
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
cfg := clientv3.Config{
Endpoints: []string{
clus.Members[0].GRPCAddr(),
clus.Members[1].GRPCAddr(),
clus.Members[2].GRPCAddr(),
clus.Members[0].GRPCURL(),
clus.Members[1].GRPCURL(),
clus.Members[2].GRPCURL(),
},
}
cli, err := integration.NewClient(t, cfg)
@ -82,7 +82,7 @@ func TestDetectKvOrderViolation(t *testing.T) {
clus.Members[1].Stop(t)
assert.NoError(t, clus.Members[2].Restart(t))
// force OrderingKv to query the third member
cli.SetEndpoints(clus.Members[2].GRPCAddr())
cli.SetEndpoints(clus.Members[2].GRPCURL())
time.Sleep(2 * time.Second) // FIXME: Figure out how pause SetEndpoints sufficiently that this is not needed
t.Logf("Quering m2 after restart")
@ -97,14 +97,14 @@ func TestDetectTxnOrderViolation(t *testing.T) {
var errOrderViolation = errors.New("DetectedOrderViolation")
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
cfg := clientv3.Config{
Endpoints: []string{
clus.Members[0].GRPCAddr(),
clus.Members[1].GRPCAddr(),
clus.Members[2].GRPCAddr(),
clus.Members[0].GRPCURL(),
clus.Members[1].GRPCURL(),
clus.Members[2].GRPCURL(),
},
}
cli, err := integration.NewClient(t, cfg)
@ -151,7 +151,7 @@ func TestDetectTxnOrderViolation(t *testing.T) {
clus.Members[1].Stop(t)
assert.NoError(t, clus.Members[2].Restart(t))
// force OrderingKv to query the third member
cli.SetEndpoints(clus.Members[2].GRPCAddr())
cli.SetEndpoints(clus.Members[2].GRPCURL())
time.Sleep(2 * time.Second) // FIXME: Figure out how pause SetEndpoints sufficiently that this is not needed
_, err = orderingKv.Get(ctx, "foo", clientv3.WithSerializable())
if err != errOrderViolation {

View File

@ -29,11 +29,11 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) {
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
eps := []string{
clus.Members[0].GRPCAddr(),
clus.Members[1].GRPCAddr(),
clus.Members[2].GRPCAddr(),
clus.Members[0].GRPCURL(),
clus.Members[1].GRPCURL(),
clus.Members[2].GRPCURL(),
}
cfg := clientv3.Config{Endpoints: []string{clus.Members[0].GRPCAddr()}}
cfg := clientv3.Config{Endpoints: []string{clus.Members[0].GRPCURL()}}
cli, err := integration.NewClient(t, cfg)
if err != nil {
t.Fatal(err)
@ -71,7 +71,7 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) {
}
t.Logf("Reconfigure client to speak only to the 'partitioned' member")
cli.SetEndpoints(clus.Members[2].GRPCAddr())
cli.SetEndpoints(clus.Members[2].GRPCURL())
_, err = orderingKv.Get(ctx, "foo", clientv3.WithSerializable())
if err != ordering.ErrNoGreaterRev {
t.Fatal("While speaking to partitioned leader, we should get ErrNoGreaterRev error")
@ -80,15 +80,15 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) {
func TestUnresolvableOrderViolation(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 5, SkipCreatingClient: true})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 5, SkipCreatingClient: true, UseBridge: true})
defer clus.Terminate(t)
cfg := clientv3.Config{
Endpoints: []string{
clus.Members[0].GRPCAddr(),
clus.Members[1].GRPCAddr(),
clus.Members[2].GRPCAddr(),
clus.Members[3].GRPCAddr(),
clus.Members[4].GRPCAddr(),
clus.Members[0].GRPCURL(),
clus.Members[1].GRPCURL(),
clus.Members[2].GRPCURL(),
clus.Members[3].GRPCURL(),
clus.Members[4].GRPCURL(),
},
}
cli, err := integration.NewClient(t, cfg)
@ -99,7 +99,7 @@ func TestUnresolvableOrderViolation(t *testing.T) {
eps := cli.Endpoints()
ctx := context.TODO()
cli.SetEndpoints(clus.Members[0].GRPCAddr())
cli.SetEndpoints(clus.Members[0].GRPCURL())
time.Sleep(1 * time.Second)
_, err = cli.Put(ctx, "foo", "bar")
if err != nil {
@ -139,7 +139,7 @@ func TestUnresolvableOrderViolation(t *testing.T) {
t.Fatal(err)
}
clus.Members[3].WaitStarted(t)
cli.SetEndpoints(clus.Members[3].GRPCAddr())
cli.SetEndpoints(clus.Members[3].GRPCURL())
_, err = OrderingKv.Get(ctx, "foo", clientv3.WithSerializable())
if err != ordering.ErrNoGreaterRev {

View File

@ -53,7 +53,7 @@ func TestTxnError(t *testing.T) {
func TestTxnWriteFail(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
kv := clus.Client(0)
@ -103,7 +103,7 @@ func TestTxnReadRetry(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
kv := clus.Client(0)

View File

@ -47,7 +47,7 @@ type watchctx struct {
func runWatchTest(t *testing.T, f watcherTest) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
wclientMember := rand.Intn(3)
@ -188,7 +188,7 @@ func testWatchReconnRequest(t *testing.T, wctx *watchctx) {
defer close(donec)
// take down watcher connection
for {
wctx.clus.Members[wctx.wclientMember].DropConnections()
wctx.clus.Members[wctx.wclientMember].Bridge().DropConnections()
select {
case <-timer:
// spinning on close may live lock reconnection
@ -230,7 +230,7 @@ func testWatchReconnInit(t *testing.T, wctx *watchctx) {
if wctx.ch = wctx.w.Watch(context.TODO(), "a"); wctx.ch == nil {
t.Fatalf("expected non-nil channel")
}
wctx.clus.Members[wctx.wclientMember].DropConnections()
wctx.clus.Members[wctx.wclientMember].Bridge().DropConnections()
// watcher should recover
putAndWatch(t, wctx, "a", "a")
}
@ -247,7 +247,7 @@ func testWatchReconnRunning(t *testing.T, wctx *watchctx) {
}
putAndWatch(t, wctx, "a", "a")
// take down watcher connection
wctx.clus.Members[wctx.wclientMember].DropConnections()
wctx.clus.Members[wctx.wclientMember].Bridge().DropConnections()
// watcher should recover
putAndWatch(t, wctx, "a", "b")
}
@ -348,7 +348,7 @@ func putAndWatch(t *testing.T, wctx *watchctx, key, val string) {
func TestWatchResumeInitRev(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
cli := clus.Client(0)
@ -368,8 +368,8 @@ func TestWatchResumeInitRev(t *testing.T) {
t.Fatalf("got (%v, %v), expected create notification rev=4", resp, ok)
}
// pause wch
clus.Members[0].DropConnections()
clus.Members[0].PauseConnections()
clus.Members[0].Bridge().DropConnections()
clus.Members[0].Bridge().PauseConnections()
select {
case resp, ok := <-wch:
@ -378,7 +378,7 @@ func TestWatchResumeInitRev(t *testing.T) {
}
// resume wch
clus.Members[0].UnpauseConnections()
clus.Members[0].Bridge().UnpauseConnections()
select {
case resp, ok := <-wch:
@ -404,7 +404,7 @@ func TestWatchResumeInitRev(t *testing.T) {
func TestWatchResumeCompacted(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
// create a waiting watcher at rev 1
@ -955,7 +955,7 @@ func TestWatchWithCreatedNotification(t *testing.T) {
func TestWatchWithCreatedNotificationDropConn(t *testing.T) {
integration.BeforeTest(t)
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer cluster.Terminate(t)
client := cluster.RandClient()
@ -968,7 +968,7 @@ func TestWatchWithCreatedNotificationDropConn(t *testing.T) {
t.Fatalf("expected created event, got %v", resp)
}
cluster.Members[0].DropConnections()
cluster.Members[0].Bridge().DropConnections()
// check watch channel doesn't post another watch response.
select {
@ -1056,14 +1056,14 @@ func TestWatchOverlapContextCancel(t *testing.T) {
func TestWatchOverlapDropConnContextCancel(t *testing.T) {
f := func(clus *integration.ClusterV3) {
clus.Members[0].DropConnections()
clus.Members[0].Bridge().DropConnections()
}
testWatchOverlapContextCancel(t, f)
}
func testWatchOverlapContextCancel(t *testing.T, f func(*integration.ClusterV3)) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
n := 100
@ -1154,7 +1154,7 @@ func TestWatchCancelAndCloseClient(t *testing.T) {
// then closes the watcher interface to ensure correct clean up.
func TestWatchStressResumeClose(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
cli := clus.Client(0)
@ -1164,7 +1164,7 @@ func TestWatchStressResumeClose(t *testing.T) {
for i := range wchs {
wchs[i] = cli.Watch(ctx, "abc")
}
clus.Members[0].DropConnections()
clus.Members[0].Bridge().DropConnections()
cancel()
if err := cli.Close(); err != nil {
t.Fatal(err)

View File

@ -39,6 +39,7 @@ import (
"go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/client/v2"
"go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/pkg/v3/grpc_testing"
"go.etcd.io/etcd/raft/v3"
"go.etcd.io/etcd/server/v3/config"
"go.etcd.io/etcd/server/v3/embed"
@ -73,6 +74,7 @@ const (
basePort = 21000
URLScheme = "unix"
URLSchemeTLS = "unixs"
baseGRPCPort = 30000
)
var (
@ -121,6 +123,10 @@ var (
defaultTokenJWT = fmt.Sprintf("jwt,pub-key=%s,priv-key=%s,sign-method=RS256,ttl=1s",
MustAbsPath("../fixtures/server.crt"), MustAbsPath("../fixtures/server.key.insecure"))
// uniqueNumber is used to generate unique port numbers
// Should only be accessed via atomic package methods.
uniqueNumber int32
)
type ClusterConfig struct {
@ -153,6 +159,11 @@ type ClusterConfig struct {
// UseIP is true to use only IP for gRPC requests.
UseIP bool
// UseBridge adds bridge between client and grpc server. Should be used in tests that
// want to manipulate connection or require connection not breaking despite server stop/restart.
UseBridge bool
// UseTCP configures server listen on tcp socket. If disabled unix socket is used.
UseTCP bool
EnableLeaseCheckpoint bool
LeaseCheckpointInterval time.Duration
@ -208,7 +219,7 @@ func newCluster(t testutil.TB, cfg *ClusterConfig) *cluster {
c := &cluster{cfg: cfg}
ms := make([]*member, cfg.Size)
for i := 0; i < cfg.Size; i++ {
ms[i] = c.mustNewMember(t)
ms[i] = c.mustNewMember(t, int64(i))
}
c.Members = ms
if err := c.fillClusterForMembers(); err != nil {
@ -249,7 +260,7 @@ func (c *cluster) Launch(t testutil.TB) {
c.waitMembersMatch(t, c.HTTPMembers())
c.waitVersion()
for _, m := range c.Members {
t.Logf(" - %v -> %v (%v)", m.Name, m.ID(), m.GRPCAddr())
t.Logf(" - %v -> %v (%v)", m.Name, m.ID(), m.GRPCURL())
}
}
@ -295,10 +306,11 @@ func (c *cluster) HTTPMembers() []client.Member {
return ms
}
func (c *cluster) mustNewMember(t testutil.TB) *member {
func (c *cluster) mustNewMember(t testutil.TB, memberNumber int64) *member {
m := mustNewMember(t,
memberConfig{
name: c.generateMemberName(),
memberNumber: memberNumber,
authToken: c.cfg.AuthToken,
peerTLS: c.cfg.PeerTLS,
clientTLS: c.cfg.ClientTLS,
@ -313,6 +325,8 @@ func (c *cluster) mustNewMember(t testutil.TB) *member {
clientMaxCallSendMsgSize: c.cfg.ClientMaxCallSendMsgSize,
clientMaxCallRecvMsgSize: c.cfg.ClientMaxCallRecvMsgSize,
useIP: c.cfg.UseIP,
useBridge: c.cfg.UseBridge,
useTCP: c.cfg.UseTCP,
enableLeaseCheckpoint: c.cfg.EnableLeaseCheckpoint,
leaseCheckpointInterval: c.cfg.LeaseCheckpointInterval,
WatchProgressNotifyInterval: c.cfg.WatchProgressNotifyInterval,
@ -328,7 +342,7 @@ func (c *cluster) mustNewMember(t testutil.TB) *member {
// addMember return PeerURLs of the added member.
func (c *cluster) addMember(t testutil.TB) types.URLs {
m := c.mustNewMember(t)
m := c.mustNewMember(t, 0)
scheme := schemeFromTLSInfo(c.cfg.PeerTLS)
@ -557,6 +571,8 @@ func NewListenerWithAddr(t testutil.TB, addr string) net.Listener {
type member struct {
config.ServerConfig
UniqNumber int64
MemberNumber int64
PeerListeners, ClientListeners []net.Listener
grpcListener net.Listener
// PeerTLSInfo enables peer TLS when set
@ -572,7 +588,7 @@ type member struct {
grpcServerOpts []grpc.ServerOption
grpcServer *grpc.Server
grpcServerPeer *grpc.Server
grpcAddr string
grpcURL string
grpcBridge *bridge
// serverClient is a clientv3 that directly calls the etcdserver.
@ -582,15 +598,21 @@ type member struct {
clientMaxCallSendMsgSize int
clientMaxCallRecvMsgSize int
useIP bool
useBridge bool
useTCP bool
isLearner bool
closed bool
grpcServerRecorder *grpc_testing.GrpcRecorder
}
func (m *member) GRPCAddr() string { return m.grpcAddr }
func (m *member) GRPCURL() string { return m.grpcURL }
type memberConfig struct {
name string
uniqNumber int64
memberNumber int64
peerTLS *transport.TLSInfo
clientTLS *transport.TLSInfo
authToken string
@ -605,6 +627,8 @@ type memberConfig struct {
clientMaxCallSendMsgSize int
clientMaxCallRecvMsgSize int
useIP bool
useBridge bool
useTCP bool
enableLeaseCheckpoint bool
leaseCheckpointInterval time.Duration
WatchProgressNotifyInterval time.Duration
@ -614,7 +638,10 @@ type memberConfig struct {
// set, it will use https scheme to communicate between peers.
func mustNewMember(t testutil.TB, mcfg memberConfig) *member {
var err error
m := &member{}
m := &member{
MemberNumber: mcfg.memberNumber,
UniqNumber: atomic.AddInt64(&localListenCount, 1),
}
peerScheme := schemeFromTLSInfo(mcfg.peerTLS)
clientScheme := schemeFromTLSInfo(mcfg.clientTLS)
@ -698,6 +725,8 @@ func mustNewMember(t testutil.TB, mcfg memberConfig) *member {
m.clientMaxCallSendMsgSize = mcfg.clientMaxCallSendMsgSize
m.clientMaxCallRecvMsgSize = mcfg.clientMaxCallRecvMsgSize
m.useIP = mcfg.useIP
m.useBridge = mcfg.useBridge
m.useTCP = mcfg.useTCP
m.EnableLeaseCheckpoint = mcfg.enableLeaseCheckpoint
m.LeaseCheckpointInterval = mcfg.leaseCheckpointInterval
@ -708,7 +737,7 @@ func mustNewMember(t testutil.TB, mcfg memberConfig) *member {
m.WarningUnaryRequestDuration = embed.DefaultWarningUnaryRequestDuration
m.V2Deprecation = config.V2_DEPR_DEFAULT
m.grpcServerRecorder = &grpc_testing.GrpcRecorder{}
m.Logger = memberLogger(t, mcfg.name)
t.Cleanup(func() {
// if we didn't cleanup the logger, the consecutive test
@ -731,45 +760,109 @@ func memberLogger(t testutil.TB, name string) *zap.Logger {
// listenGRPC starts a grpc server over a unix domain socket on the member
func (m *member) listenGRPC() error {
// prefix with localhost so cert has right domain
m.grpcAddr = "localhost:" + m.Name
m.Logger.Info("LISTEN GRPC", zap.String("m.grpcAddr", m.grpcAddr), zap.String("m.Name", m.Name))
if m.useIP { // for IP-only TLS certs
m.grpcAddr = "127.0.0.1:" + m.Name
}
l, err := transport.NewUnixListener(m.grpcAddr)
network, host, port := m.grpcAddr()
grpcAddr := host + ":" + port
m.Logger.Info("LISTEN GRPC", zap.String("grpcAddr", grpcAddr), zap.String("m.Name", m.Name))
grpcListener, err := net.Listen(network, grpcAddr)
if err != nil {
return fmt.Errorf("listen failed on grpc socket %s (%v)", m.grpcAddr, err)
return fmt.Errorf("listen failed on grpc socket %s (%v)", grpcAddr, err)
}
m.grpcBridge, err = newBridge(m.grpcAddr)
if err != nil {
l.Close()
return err
m.grpcURL = fmt.Sprintf("%s://%s", m.clientScheme(), grpcAddr)
if m.useBridge {
_, err = m.addBridge()
if err != nil {
grpcListener.Close()
return err
}
}
m.grpcAddr = schemeFromTLSInfo(m.ClientTLSInfo) + "://" + m.grpcBridge.inaddr
m.grpcListener = l
m.grpcListener = grpcListener
return nil
}
func (m *member) clientScheme() string {
switch {
case m.useTCP && m.ClientTLSInfo != nil:
return "https"
case m.useTCP && m.ClientTLSInfo == nil:
return "http"
case !m.useTCP && m.ClientTLSInfo != nil:
return "unixs"
case !m.useTCP && m.ClientTLSInfo == nil:
return "unix"
}
m.Logger.Panic("Failed to determine client schema")
return ""
}
func (m *member) addBridge() (*bridge, error) {
network, host, port := m.grpcAddr()
grpcAddr := host + ":" + port
bridgeAddr := grpcAddr + "0"
m.Logger.Info("LISTEN BRIDGE", zap.String("grpc-address", bridgeAddr), zap.String("member", m.Name))
bridgeListener, err := transport.NewUnixListener(bridgeAddr)
if err != nil {
return nil, fmt.Errorf("listen failed on bridge socket %s (%v)", bridgeAddr, err)
}
m.grpcBridge, err = newBridge(dialer{network: network, addr: grpcAddr}, bridgeListener)
if err != nil {
bridgeListener.Close()
return nil, err
}
m.grpcURL = m.clientScheme() + "://" + bridgeAddr
return m.grpcBridge, nil
}
func (m *member) Bridge() *bridge {
if !m.useBridge {
m.Logger.Panic("Bridge not available. Please configure using bridge before creating cluster.")
}
return m.grpcBridge
}
func (m *member) grpcAddr() (network, host, port string) {
// prefix with localhost so cert has right domain
host = "localhost"
if m.useIP { // for IP-only TLS certs
host = "127.0.0.1"
}
network = "unix"
if m.useTCP {
network = "tcp"
}
port = m.Name
if m.useTCP {
port = fmt.Sprintf("%d", GrpcPortNumber(m.UniqNumber, m.MemberNumber))
}
return network, host, port
}
func GrpcPortNumber(uniqNumber, memberNumber int64) int64 {
return baseGRPCPort + uniqNumber*10 + memberNumber
}
type dialer struct {
network string
addr string
}
func (d dialer) Dial() (net.Conn, error) {
return net.Dial(d.network, d.addr)
}
func (m *member) ElectionTimeout() time.Duration {
return time.Duration(m.s.Cfg.ElectionTicks*int(m.s.Cfg.TickMs)) * time.Millisecond
}
func (m *member) ID() types.ID { return m.s.ID() }
func (m *member) DropConnections() { m.grpcBridge.Reset() }
func (m *member) PauseConnections() { m.grpcBridge.Pause() }
func (m *member) UnpauseConnections() { m.grpcBridge.Unpause() }
func (m *member) Blackhole() { m.grpcBridge.Blackhole() }
func (m *member) Unblackhole() { m.grpcBridge.Unblackhole() }
// NewClientV3 creates a new grpc client connection to the member
func NewClientV3(m *member) (*clientv3.Client, error) {
if m.grpcAddr == "" {
if m.grpcURL == "" {
return nil, fmt.Errorf("member not configured for grpc")
}
cfg := clientv3.Config{
Endpoints: []string{m.grpcAddr},
Endpoints: []string{m.grpcURL},
DialTimeout: 5 * time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
MaxCallSendMsgSize: m.clientMaxCallSendMsgSize,
@ -831,7 +924,7 @@ func (m *member) Launch() error {
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-address", m.grpcAddr),
zap.String("grpc-url", m.grpcURL),
)
var err error
if m.s, err = etcdserver.NewServer(m.ServerConfig); err != nil {
@ -857,8 +950,8 @@ func (m *member) Launch() error {
return err
}
}
m.grpcServer = v3rpc.Server(m.s, tlscfg, m.grpcServerOpts...)
m.grpcServerPeer = v3rpc.Server(m.s, peerTLScfg)
m.grpcServer = v3rpc.Server(m.s, tlscfg, m.grpcServerRecorder.UnaryInterceptor(), m.grpcServerOpts...)
m.grpcServerPeer = v3rpc.Server(m.s, peerTLScfg, m.grpcServerRecorder.UnaryInterceptor())
m.serverClient = v3client.New(m.s)
lockpb.RegisterLockServer(m.grpcServer, v3lock.NewLockServer(m.serverClient))
epb.RegisterElectionServer(m.grpcServer, v3election.NewElectionServer(m.serverClient))
@ -988,11 +1081,15 @@ func (m *member) Launch() error {
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-address", m.grpcAddr),
zap.String("grpc-url", m.grpcURL),
)
return nil
}
func (m *member) RecordedRequests() []grpc_testing.RequestInfo {
return m.grpcServerRecorder.RecordedRequests()
}
func (m *member) WaitOK(t testutil.TB) {
m.WaitStarted(t)
for m.s.Leader() == 0 {
@ -1101,7 +1198,7 @@ func (m *member) Stop(_ testutil.TB) {
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-address", m.grpcAddr),
zap.String("grpc-url", m.grpcURL),
)
m.Close()
m.serverClosers = nil
@ -1110,7 +1207,7 @@ func (m *member) Stop(_ testutil.TB) {
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-address", m.grpcAddr),
zap.String("grpc-url", m.grpcURL),
)
}
@ -1135,7 +1232,7 @@ func (m *member) Restart(t testutil.TB) error {
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-address", m.grpcAddr),
zap.String("grpc-url", m.grpcURL),
)
newPeerListeners := make([]net.Listener, 0)
for _, ln := range m.PeerListeners {
@ -1160,7 +1257,7 @@ func (m *member) Restart(t testutil.TB) error {
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-address", m.grpcAddr),
zap.String("grpc-url", m.grpcURL),
zap.Error(err),
)
return err
@ -1173,7 +1270,7 @@ func (m *member) Terminate(t testutil.TB) {
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-address", m.grpcAddr),
zap.String("grpc-url", m.grpcURL),
)
m.Close()
if !m.keepDataDirTerminate {
@ -1186,7 +1283,7 @@ func (m *member) Terminate(t testutil.TB) {
zap.String("name", m.Name),
zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()),
zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()),
zap.String("grpc-address", m.grpcAddr),
zap.String("grpc-url", m.grpcURL),
)
}
@ -1282,8 +1379,9 @@ func (p SortableMemberSliceByPeerURLs) Swap(i, j int) { p[i], p[j] = p[j], p[i]
type ClusterV3 struct {
*cluster
mu sync.Mutex
clients []*clientv3.Client
mu sync.Mutex
clients []*clientv3.Client
clusterClient *clientv3.Client
}
// NewClusterV3 returns a launched cluster with a grpc client connection
@ -1329,6 +1427,11 @@ func (c *ClusterV3) Terminate(t testutil.TB) {
t.Error(err)
}
}
if c.clusterClient != nil {
if err := c.clusterClient.Close(); err != nil {
t.Error(err)
}
}
c.mu.Unlock()
c.cluster.Terminate(t)
}
@ -1341,6 +1444,25 @@ func (c *ClusterV3) Client(i int) *clientv3.Client {
return c.clients[i]
}
func (c *ClusterV3) ClusterClient() (client *clientv3.Client, err error) {
if c.clusterClient == nil {
endpoints := []string{}
for _, m := range c.Members {
endpoints = append(endpoints, m.grpcURL)
}
cfg := clientv3.Config{
Endpoints: endpoints,
DialTimeout: 5 * time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
}
c.clusterClient, err = newClientV3(cfg)
if err != nil {
return nil, err
}
}
return c.clusterClient, nil
}
// NewClientV3 creates a new grpc client connection to the member
func (c *ClusterV3) NewClientV3(memberIndex int) (*clientv3.Client, error) {
return NewClientV3(c.Members[memberIndex])
@ -1420,7 +1542,7 @@ func (c *ClusterV3) GetLearnerMembers() ([]*pb.Member, error) {
// AddAndLaunchLearnerMember creates a leaner member, adds it to cluster
// via v3 MemberAdd API, and then launches the new member.
func (c *ClusterV3) AddAndLaunchLearnerMember(t testutil.TB) {
m := c.mustNewMember(t)
m := c.mustNewMember(t, 0)
m.isLearner = true
scheme := schemeFromTLSInfo(c.cfg.PeerTLS)
@ -1521,7 +1643,7 @@ func (p SortableProtoMemberSliceByPeerURLs) Swap(i, j int) { p[i], p[j] = p[j],
// MustNewMember creates a new member instance based on the response of V3 Member Add API.
func (c *ClusterV3) MustNewMember(t testutil.TB, resp *clientv3.MemberAddResponse) *member {
m := c.mustNewMember(t)
m := c.mustNewMember(t, 0)
m.isLearner = resp.Member.IsLearner
m.NewCluster = false

View File

@ -173,7 +173,7 @@ func testDecreaseClusterSize(t *testing.T, size int) {
}
func TestForceNewCluster(t *testing.T) {
c := NewCluster(t, 3)
c := newCluster(t, &ClusterConfig{Size: 3, UseBridge: true})
c.Launch(t)
cc := MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil)
kapi := client.NewKeysAPI(cc)
@ -283,7 +283,7 @@ func testIssue2746(t *testing.T, members int) {
func TestIssue2904(t *testing.T) {
BeforeTest(t)
// start 1-member cluster to ensure member 0 is the leader of the cluster.
c := NewCluster(t, 1)
c := newCluster(t, &ClusterConfig{Size: 1, UseBridge: true})
c.Launch(t)
defer c.Terminate(t)
@ -319,7 +319,7 @@ func TestIssue2904(t *testing.T) {
func TestIssue3699(t *testing.T) {
// start a cluster of 3 nodes a, b, c
BeforeTest(t)
c := NewCluster(t, 3)
c := newCluster(t, &ClusterConfig{Size: 3, UseBridge: true})
c.Launch(t)
defer c.Terminate(t)
@ -371,7 +371,7 @@ func TestIssue3699(t *testing.T) {
// TestRejectUnhealthyAdd ensures an unhealthy cluster rejects adding members.
func TestRejectUnhealthyAdd(t *testing.T) {
BeforeTest(t)
c := NewCluster(t, 3)
c := newCluster(t, &ClusterConfig{Size: 3, UseBridge: true})
for _, m := range c.Members {
m.ServerConfig.StrictReconfigCheck = true
}
@ -415,7 +415,7 @@ func TestRejectUnhealthyAdd(t *testing.T) {
// if quorum will be lost.
func TestRejectUnhealthyRemove(t *testing.T) {
BeforeTest(t)
c := NewCluster(t, 5)
c := newCluster(t, &ClusterConfig{Size: 5, UseBridge: true})
for _, m := range c.Members {
m.ServerConfig.StrictReconfigCheck = true
}
@ -464,7 +464,7 @@ func TestRestartRemoved(t *testing.T) {
BeforeTest(t)
// 1. start single-member cluster
c := NewCluster(t, 1)
c := newCluster(t, &ClusterConfig{Size: 1, UseBridge: true})
for _, m := range c.Members {
m.ServerConfig.StrictReconfigCheck = true
}
@ -540,7 +540,7 @@ func clusterMustProgress(t *testing.T, membs []*member) {
func TestSpeedyTerminate(t *testing.T) {
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true})
// Stop/Restart so requests will time out on lost leaders
for i := 0; i < 3; i++ {
clus.Members[i].Stop(t)

View File

@ -0,0 +1,197 @@
// Copyright 2021 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"context"
tls "crypto/tls"
"fmt"
"strings"
"testing"
"time"
clientv3 "go.etcd.io/etcd/client/v3"
"google.golang.org/grpc"
)
func TestAuthority(t *testing.T) {
tcs := []struct {
name string
useTCP bool
useTLS bool
// Pattern used to generate endpoints for client. Fields filled
// %d - will be filled with member grpc port
// %s - will be filled with member name
clientURLPattern string
// Pattern used to validate authority received by server. Fields filled:
// %d - will be filled with first member grpc port
// %s - will be filled with first member name
expectAuthorityPattern string
}{
{
name: "unix:path",
clientURLPattern: "unix:localhost:%s",
expectAuthorityPattern: "localhost:%s",
},
{
name: "unix://absolute_path",
clientURLPattern: "unix://localhost:%s",
expectAuthorityPattern: "localhost:%s",
},
// "unixs" is not standard schema supported by etcd
{
name: "unixs:absolute_path",
useTLS: true,
clientURLPattern: "unixs:localhost:%s",
expectAuthorityPattern: "localhost:%s",
},
{
name: "unixs://absolute_path",
useTLS: true,
clientURLPattern: "unixs://localhost:%s",
expectAuthorityPattern: "localhost:%s",
},
{
name: "http://domain[:port]",
useTCP: true,
clientURLPattern: "http://localhost:%d",
expectAuthorityPattern: "localhost:%d",
},
{
name: "https://domain[:port]",
useTLS: true,
useTCP: true,
clientURLPattern: "https://localhost:%d",
expectAuthorityPattern: "localhost:%d",
},
{
name: "http://address[:port]",
useTCP: true,
clientURLPattern: "http://127.0.0.1:%d",
expectAuthorityPattern: "127.0.0.1:%d",
},
{
name: "https://address[:port]",
useTCP: true,
useTLS: true,
clientURLPattern: "https://127.0.0.1:%d",
expectAuthorityPattern: "127.0.0.1:%d",
},
}
for _, tc := range tcs {
for _, clusterSize := range []int{1, 3} {
t.Run(fmt.Sprintf("Size: %d, Scenario: %q", clusterSize, tc.name), func(t *testing.T) {
BeforeTest(t)
cfg := ClusterConfig{
Size: clusterSize,
UseTCP: tc.useTCP,
UseIP: tc.useTCP,
}
cfg, tlsConfig := setupTLS(t, tc.useTLS, cfg)
clus := NewClusterV3(t, &cfg)
defer clus.Terminate(t)
kv := setupClient(t, tc.clientURLPattern, clus, tlsConfig)
defer kv.Close()
_, err := kv.Put(context.TODO(), "foo", "bar")
if err != nil {
t.Fatal(err)
}
assertAuthority(t, templateAuthority(t, tc.expectAuthorityPattern, clus.Members[0]), clus)
})
}
}
}
func setupTLS(t *testing.T, useTLS bool, cfg ClusterConfig) (ClusterConfig, *tls.Config) {
t.Helper()
if useTLS {
cfg.ClientTLS = &testTLSInfo
tlsConfig, err := testTLSInfo.ClientConfig()
if err != nil {
t.Fatal(err)
}
return cfg, tlsConfig
}
return cfg, nil
}
func setupClient(t *testing.T, endpointPattern string, clus *ClusterV3, tlsConfig *tls.Config) *clientv3.Client {
t.Helper()
endpoints := templateEndpoints(t, endpointPattern, clus)
kv, err := clientv3.New(clientv3.Config{
Endpoints: endpoints,
DialTimeout: 5 * time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
TLS: tlsConfig,
})
if err != nil {
t.Fatal(err)
}
return kv
}
func templateEndpoints(t *testing.T, pattern string, clus *ClusterV3) []string {
t.Helper()
endpoints := []string{}
for _, m := range clus.Members {
ent := pattern
if strings.Contains(ent, "%d") {
ent = fmt.Sprintf(ent, GrpcPortNumber(m.UniqNumber, m.MemberNumber))
}
if strings.Contains(ent, "%s") {
ent = fmt.Sprintf(ent, m.Name)
}
if strings.Contains(ent, "%") {
t.Fatalf("Failed to template pattern, %% symbol left %q", ent)
}
endpoints = append(endpoints, ent)
}
return endpoints
}
func templateAuthority(t *testing.T, pattern string, m *member) string {
t.Helper()
authority := pattern
if strings.Contains(authority, "%d") {
authority = fmt.Sprintf(authority, GrpcPortNumber(m.UniqNumber, m.MemberNumber))
}
if strings.Contains(authority, "%s") {
authority = fmt.Sprintf(authority, m.Name)
}
if strings.Contains(authority, "%") {
t.Fatalf("Failed to template pattern, %% symbol left %q", authority)
}
return authority
}
func assertAuthority(t *testing.T, expectedAuthority string, clus *ClusterV3) {
t.Helper()
requestsFound := 0
for _, m := range clus.Members {
for _, r := range m.RecordedRequests() {
requestsFound++
if r.Authority != expectedAuthority {
t.Errorf("Got unexpected authority header, member: %q, request: %q, got authority: %q, expected %q", m.Name, r.FullMethod, r.Authority, expectedAuthority)
}
}
}
if requestsFound == 0 {
t.Errorf("Expected at least one request")
}
}

View File

@ -46,7 +46,7 @@ func TestPauseMember(t *testing.T) {
func TestRestartMember(t *testing.T) {
BeforeTest(t)
c := NewCluster(t, 3)
c := newCluster(t, &ClusterConfig{Size: 3, UseBridge: true})
c.Launch(t)
defer c.Terminate(t)
@ -88,7 +88,7 @@ func TestLaunchDuplicateMemberShouldFail(t *testing.T) {
func TestSnapshotAndRestartMember(t *testing.T) {
BeforeTest(t)
m := mustNewMember(t, memberConfig{name: "snapAndRestartTest"})
m := mustNewMember(t, memberConfig{name: "snapAndRestartTest", useBridge: true})
m.SnapshotCount = 100
m.Launch()
defer m.Terminate(t)

View File

@ -36,7 +36,7 @@ func TestClusterProxyMemberList(t *testing.T) {
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cts := newClusterProxyServer(zaptest.NewLogger(t), []string{clus.Members[0].GRPCAddr()}, t)
cts := newClusterProxyServer(zaptest.NewLogger(t), []string{clus.Members[0].GRPCURL()}, t)
defer cts.close(t)
cfg := clientv3.Config{

View File

@ -34,7 +34,7 @@ func TestKVProxyRange(t *testing.T) {
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
kvts := newKVProxyServer([]string{clus.Members[0].GRPCAddr()}, t)
kvts := newKVProxyServer([]string{clus.Members[0].GRPCURL()}, t)
defer kvts.close()
// create a client and try to get key from proxy.

View File

@ -31,7 +31,7 @@ func TestRegister(t *testing.T) {
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
paddr := clus.Members[0].GRPCAddr()
paddr := clus.Members[0].GRPCURL()
testPrefix := "test-name"
wa := mustCreateWatcher(t, cli, testPrefix)

View File

@ -35,7 +35,7 @@ func TestV3StorageQuotaApply(t *testing.T) {
BeforeTest(t)
quotasize := int64(16 * os.Getpagesize())
clus := NewClusterV3(t, &ClusterConfig{Size: 2})
clus := NewClusterV3(t, &ClusterConfig{Size: 2, UseBridge: true})
defer clus.Terminate(t)
kvc0 := toGRPC(clus.Client(0)).KV
kvc1 := toGRPC(clus.Client(1)).KV
@ -147,7 +147,7 @@ func TestV3AlarmDeactivate(t *testing.T) {
func TestV3CorruptAlarm(t *testing.T) {
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
var wg sync.WaitGroup

View File

@ -61,7 +61,7 @@ func TestV3MaintenanceDefragmentInflightRange(t *testing.T) {
// See https://github.com/etcd-io/etcd/issues/7322 for more detail.
func TestV3KVInflightRangeRequests(t *testing.T) {
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
cli := clus.RandClient()

View File

@ -22,6 +22,7 @@ import (
"math/rand"
"os"
"reflect"
"strings"
"testing"
"time"
@ -88,7 +89,7 @@ func TestV3PutOverwrite(t *testing.T) {
// TestPutRestart checks if a put after an unrelated member restart succeeds
func TestV3PutRestart(t *testing.T) {
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
kvIdx := rand.Intn(3)
@ -1210,7 +1211,7 @@ func TestV3Hash(t *testing.T) {
// TestV3HashRestart ensures that hash stays the same after restart.
func TestV3HashRestart(t *testing.T) {
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
cli := clus.RandClient()
@ -1243,7 +1244,7 @@ func TestV3StorageQuotaAPI(t *testing.T) {
BeforeTest(t)
quotasize := int64(16 * os.Getpagesize())
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true})
// Set a quota on one node
clus.Members[0].QuotaBackendBytes = quotasize
@ -1601,8 +1602,10 @@ func TestTLSGRPCRejectSecureClient(t *testing.T) {
clus.Members[0].ClientTLSInfo = &testTLSInfo
clus.Members[0].DialOptions = []grpc.DialOption{grpc.WithBlock()}
clus.Members[0].grpcURL = strings.Replace(clus.Members[0].grpcURL, "http://", "https://", 1)
client, err := NewClientV3(clus.Members[0])
if client != nil || err == nil {
client.Close()
t.Fatalf("expected no client")
} else if err != context.DeadlineExceeded {
t.Fatalf("unexpected error (%v)", err)
@ -1784,7 +1787,7 @@ func testTLSReload(
}
cli, cerr := NewClient(t, clientv3.Config{
DialOptions: []grpc.DialOption{grpc.WithBlock()},
Endpoints: []string{clus.Members[0].GRPCAddr()},
Endpoints: []string{clus.Members[0].GRPCURL()},
DialTimeout: time.Second,
TLS: cc,
})
@ -1818,7 +1821,7 @@ func testTLSReload(
t.Fatal(terr)
}
cl, cerr := NewClient(t, clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCAddr()},
Endpoints: []string{clus.Members[0].GRPCURL()},
DialTimeout: 5 * time.Second,
TLS: tls,
})
@ -1858,7 +1861,7 @@ func TestGRPCRequireLeader(t *testing.T) {
func TestGRPCStreamRequireLeader(t *testing.T) {
BeforeTest(t)
cfg := ClusterConfig{Size: 3}
cfg := ClusterConfig{Size: 3, UseBridge: true}
clus := newClusterV3NoClients(t, &cfg)
defer clus.Terminate(t)

View File

@ -36,7 +36,7 @@ import (
func TestV3LeasePromote(t *testing.T) {
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
clus := NewClusterV3(t, &ClusterConfig{Size: 3, UseBridge: true})
defer clus.Terminate(t)
// create lease
@ -237,6 +237,7 @@ func TestV3LeaseCheckpoint(t *testing.T) {
Size: 3,
EnableLeaseCheckpoint: true,
LeaseCheckpointInterval: leaseInterval,
UseBridge: true,
})
defer clus.Terminate(t)
@ -649,7 +650,7 @@ const fiveMinTTL int64 = 300
func TestV3LeaseRecoverAndRevoke(t *testing.T) {
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
kvc := toGRPC(clus.Client(0)).KV
@ -700,7 +701,7 @@ func TestV3LeaseRecoverAndRevoke(t *testing.T) {
func TestV3LeaseRevokeAndRecover(t *testing.T) {
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
kvc := toGRPC(clus.Client(0)).KV
@ -752,7 +753,7 @@ func TestV3LeaseRevokeAndRecover(t *testing.T) {
func TestV3LeaseRecoverKeyWithDetachedLease(t *testing.T) {
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
kvc := toGRPC(clus.Client(0)).KV
@ -808,7 +809,7 @@ func TestV3LeaseRecoverKeyWithDetachedLease(t *testing.T) {
func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) {
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
kvc := toGRPC(clus.Client(0)).KV

View File

@ -55,7 +55,7 @@ func testTLSCipherSuites(t *testing.T, valid bool) {
t.Fatal(err)
}
cli, cerr := NewClient(t, clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCAddr()},
Endpoints: []string{clus.Members[0].GRPCURL()},
DialTimeout: time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()},
TLS: cc,

View File

@ -1034,7 +1034,7 @@ func TestWatchWithProgressNotify(t *testing.T) {
// TestV3WatcMultiOpenhClose opens many watchers concurrently on multiple streams.
func TestV3WatchClose(t *testing.T) {
BeforeTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
clus := NewClusterV3(t, &ClusterConfig{Size: 1, UseBridge: true})
defer clus.Terminate(t)
c := clus.Client(0)
@ -1062,7 +1062,7 @@ func TestV3WatchClose(t *testing.T) {
}()
}
clus.Members[0].DropConnections()
clus.Members[0].Bridge().DropConnections()
wg.Wait()
}