Compare commits

..

1 Commits

Author SHA1 Message Date
Vitaliy Filippov 416381529b etcdserver: Fix 64 KB websocket notification message limit
This fixes etcd being unable to send any message longer than 64 KB as
a notification over the websocket. This was because the older version
of grpc-websocket-proxy was used and WithMaxRespBodyBufferSize option
wasn't set.
2020-10-19 15:20:44 +03:00
1339 changed files with 468613 additions and 8725 deletions

View File

@ -1,32 +0,0 @@
name: Release
on: [push, pull_request]
jobs:
release:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- id: goversion
run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT"
- uses: actions/setup-go@v2
with:
go-version: ${{ steps.goversion.outputs.goversion }}
- name: release
run: |
set -euo pipefail
git config --global user.email "github-action@etcd.io"
git config --global user.name "Github Action"
gpg --batch --gen-key <<EOF
%no-protection
Key-Type: 1
Key-Length: 2048
Subkey-Type: 1
Subkey-Length: 2048
Name-Real: Github Action
Name-Email: github-action@etcd.io
Expire-Date: 0
EOF
DRY_RUN=true ./scripts/release.sh --no-upload --no-docker-push --in-place 3.4.99
- name: test-image
run: |
VERSION=3.4.99 ./scripts/test_images.sh

View File

@ -1,78 +0,0 @@
name: Tests
on: [push, pull_request]
jobs:
test:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
target:
- linux-amd64-fmt
- linux-amd64-integration-1-cpu
- linux-amd64-integration-2-cpu
- linux-amd64-integration-4-cpu
- linux-amd64-functional
- linux-amd64-unit-4-cpu-race
- all-build
- linux-amd64-grpcproxy
- linux-amd64-e2e
- linux-386-unit
steps:
- uses: actions/checkout@v2
- id: goversion
run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT"
- uses: actions/setup-go@v2
with:
go-version: ${{ steps.goversion.outputs.goversion }}
- run: date
- env:
TARGET: ${{ matrix.target }}
run: |
set -euo pipefail
go version
echo ${GOROOT}
echo "${TARGET}"
case "${TARGET}" in
linux-amd64-fmt)
GOARCH=amd64 PASSES='fmt bom dep' ./test
;;
linux-amd64-integration-1-cpu)
GOARCH=amd64 CPU=1 PASSES='integration' RACE='false' ./test
;;
linux-amd64-integration-2-cpu)
GOARCH=amd64 CPU=2 PASSES='integration' RACE='false' ./test
;;
linux-amd64-integration-4-cpu)
GOARCH=amd64 CPU=4 PASSES='integration' RACE='false' ./test
;;
linux-amd64-functional)
./build && GOARCH=amd64 PASSES='functional' ./test
;;
linux-amd64-unit-4-cpu-race)
GOARCH=amd64 PASSES='unit' RACE='true' CPU='4' ./test -p=2
;;
all-build)
GOARCH=amd64 PASSES='build' ./test
GOARCH=386 PASSES='build' ./test
GO_BUILD_FLAGS='-v' GOOS=darwin GOARCH=amd64 ./build
GO_BUILD_FLAGS='-v' GOOS=windows GOARCH=amd64 ./build
GO_BUILD_FLAGS='-v' GOARCH=arm ./build
GO_BUILD_FLAGS='-v' GOARCH=arm64 ./build
GO_BUILD_FLAGS='-v' GOARCH=ppc64le ./build
GO_BUILD_FLAGS='-v' GOARCH=s390x ./build
;;
linux-amd64-grpcproxy)
PASSES='build grpcproxy' CPU='4' RACE='true' ./test
;;
linux-amd64-e2e)
GOARCH=amd64 PASSES='build release e2e' ./test
;;
linux-386-unit)
GOARCH=386 PASSES='unit' ./test
;;
*)
echo "Failed to find target"
exit 1
;;
esac

View File

@ -1,37 +0,0 @@
name: Trivy Nightly Scan
on:
schedule:
- cron: '0 2 * * *' # run at 2 AM UTC
permissions: read-all
jobs:
nightly-scan:
name: Trivy Scan nightly
strategy:
fail-fast: false
matrix:
# maintain the versions of etcd that need to be actively
# security scanned
versions: [v3.4.22]
permissions:
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0
with:
ref: release-3.4
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@9ab158e8597f3b310480b9a69402b419bc03dbd5 # master
with:
image-ref: 'gcr.io/etcd-development/etcd:${{ matrix.versions }}'
severity: 'CRITICAL,HIGH'
format: 'template'
template: '@/contrib/sarif.tpl'
output: 'trivy-results-3-4.sarif'
- name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@a669cc5936cc5e1b6a362ec1ff9e410dc570d190 # v2.1.36
with:
sarif_file: 'trivy-results-3-4.sarif'

1
.gitignore vendored
View File

@ -31,7 +31,6 @@ vendor/**/*
!vendor/**/License*
!vendor/**/LICENCE*
!vendor/**/LICENSE*
!vendor/modules.txt
vendor/**/*_test.go
*.bak

View File

@ -1 +0,0 @@
1.19.10

94
.travis.yml Normal file
View File

@ -0,0 +1,94 @@
language: go
go_import_path: go.etcd.io/etcd
sudo: required
services: docker
go:
- 1.12.12
notifications:
on_success: never
on_failure: never
env:
matrix:
- TARGET=linux-amd64-fmt
- TARGET=linux-amd64-integration-1-cpu
- TARGET=linux-amd64-integration-2-cpu
- TARGET=linux-amd64-integration-4-cpu
- TARGET=linux-amd64-functional
- TARGET=linux-amd64-unit
- TARGET=all-build
- TARGET=linux-amd64-grpcproxy
- TARGET=linux-386-unit
matrix:
fast_finish: true
allow_failures:
- go: 1.12.12
env: TARGET=linux-amd64-grpcproxy
- go: 1.12.12
env: TARGET=linux-386-unit
before_install:
- if [[ $TRAVIS_GO_VERSION == 1.* ]]; then docker pull gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION}; fi
install:
- go get -t -v -d ./...
script:
- echo "TRAVIS_GO_VERSION=${TRAVIS_GO_VERSION}"
- >
case "${TARGET}" in
linux-amd64-fmt)
docker run --rm \
--volume=`pwd`:/go/src/go.etcd.io/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
/bin/bash -c "GOARCH=amd64 PASSES='fmt bom dep' ./test"
;;
linux-amd64-integration-1-cpu)
docker run --rm \
--volume=`pwd`:/go/src/go.etcd.io/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
/bin/bash -c "GOARCH=amd64 CPU=1 PASSES='integration' ./test"
;;
linux-amd64-integration-2-cpu)
docker run --rm \
--volume=`pwd`:/go/src/go.etcd.io/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
/bin/bash -c "GOARCH=amd64 CPU=2 PASSES='integration' ./test"
;;
linux-amd64-integration-4-cpu)
docker run --rm \
--volume=`pwd`:/go/src/go.etcd.io/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
/bin/bash -c "GOARCH=amd64 CPU=4 PASSES='integration' ./test"
;;
linux-amd64-functional)
docker run --rm \
--volume=`pwd`:/go/src/go.etcd.io/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
/bin/bash -c "./build && GOARCH=amd64 PASSES='functional' ./test"
;;
linux-amd64-unit)
docker run --rm \
--volume=`pwd`:/go/src/go.etcd.io/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
/bin/bash -c "GOARCH=amd64 PASSES='unit' ./test"
;;
all-build)
docker run --rm \
--volume=`pwd`:/go/src/go.etcd.io/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
/bin/bash -c "GOARCH=amd64 PASSES='build' ./test \
&& GOARCH=386 PASSES='build' ./test \
&& GO_BUILD_FLAGS='-v' GOOS=darwin GOARCH=amd64 ./build \
&& GO_BUILD_FLAGS='-v' GOOS=windows GOARCH=amd64 ./build \
&& GO_BUILD_FLAGS='-v' GOARCH=arm ./build \
&& GO_BUILD_FLAGS='-v' GOARCH=arm64 ./build \
&& GO_BUILD_FLAGS='-v' GOARCH=ppc64le ./build"
;;
linux-amd64-grpcproxy)
sudo HOST_TMP_DIR=/tmp TEST_OPTS="PASSES='build grpcproxy'" make docker-test
;;
linux-386-unit)
docker run --rm \
--volume=`pwd`:/go/src/go.etcd.io/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
/bin/bash -c "GOARCH=386 PASSES='unit' ./test"
;;
esac

View File

@ -1,10 +1,15 @@
FROM --platform=linux/amd64 gcr.io/distroless/static-debian11
FROM k8s.gcr.io/debian-base:v1.0.0
ADD etcd /usr/local/bin/
ADD etcdctl /usr/local/bin/
RUN mkdir -p /var/etcd/
RUN mkdir -p /var/lib/etcd/
WORKDIR /var/etcd/
WORKDIR /var/lib/etcd/
# Alpine Linux doesn't use pam, which means that there is no /etc/nsswitch.conf,
# but Golang relies on /etc/nsswitch.conf to check the order of DNS resolving
# (see https://github.com/golang/go/commit/9dee7771f561cf6aee081c0af6658cc81fac3918)
# To fix this we just create /etc/nsswitch.conf and add the following line:
RUN echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf
EXPOSE 2379 2380

View File

@ -1,10 +1,9 @@
FROM --platform=linux/arm64 gcr.io/distroless/static-debian11
FROM k8s.gcr.io/debian-base-arm64:v1.0.0
ADD etcd /usr/local/bin/
ADD etcdctl /usr/local/bin/
WORKDIR /var/etcd/
WORKDIR /var/lib/etcd/
ADD var/etcd /var/etcd
ADD var/lib/etcd /var/lib/etcd
EXPOSE 2379 2380

View File

@ -1,10 +1,9 @@
FROM --platform=linux/ppc64le gcr.io/distroless/static-debian11
FROM k8s.gcr.io/debian-base-ppc64le:v1.0.0
ADD etcd /usr/local/bin/
ADD etcdctl /usr/local/bin/
WORKDIR /var/etcd/
WORKDIR /var/lib/etcd/
ADD var/etcd /var/etcd
ADD var/lib/etcd /var/lib/etcd
EXPOSE 2379 2380

View File

@ -128,7 +128,7 @@ for TARGET_ARCH in "amd64" "arm64" "ppc64le"; do
TAG=quay.io/coreos/etcd GOARCH=${TARGET_ARCH} \
BINARYDIR=release/etcd-${VERSION}-linux-${TARGET_ARCH} \
BUILDDIR=release \
./scripts/build-docker.sh ${VERSION}
./scripts/build-docker ${VERSION}
done
```

View File

@ -51,7 +51,7 @@ docker-remove:
GO_VERSION ?= 1.19.9
GO_VERSION ?= 1.12.12
ETCD_VERSION ?= $(shell git rev-parse --short HEAD || echo "GitNotFound")
TEST_SUFFIX = $(shell date +%s | base64 | head -c 15)
@ -65,11 +65,11 @@ endif
# Example:
# GO_VERSION=1.12.17 make build-docker-test
# GO_VERSION=1.12.12 make build-docker-test
# make build-docker-test
#
# gcloud docker -- login -u _json_key -p "$(cat /etc/gcp-key-etcd-development.json)" https://gcr.io
# GO_VERSION=1.12.17 make push-docker-test
# GO_VERSION=1.12.12 make push-docker-test
# make push-docker-test
#
# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com

View File

@ -2,14 +2,12 @@
// source: auth.proto
/*
Package authpb is a generated protocol buffer package.
It is generated from these files:
Package authpb is a generated protocol buffer package.
It is generated from these files:
auth.proto
It has these top-level messages:
It has these top-level messages:
UserAddOptions
User
Permission

View File

@ -21,7 +21,7 @@ import (
"errors"
"time"
"github.com/golang-jwt/jwt"
jwt "github.com/dgrijalva/jwt-go"
"go.uber.org/zap"
)
@ -42,7 +42,7 @@ func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInf
// rev isn't used in JWT, it is only used in simple token
var (
username string
revision float64
revision uint64
)
parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) {
@ -82,19 +82,10 @@ func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInf
return nil, false
}
username, ok = claims["username"].(string)
if !ok {
t.lg.Warn("failed to obtain user claims from jwt token")
return nil, false
}
username = claims["username"].(string)
revision = uint64(claims["revision"].(float64))
revision, ok = claims["revision"].(float64)
if !ok {
t.lg.Warn("failed to obtain revision claims from jwt token")
return nil, false
}
return &AuthInfo{Username: username, Revision: uint64(revision)}, true
return &AuthInfo{Username: username, Revision: revision}, true
}
func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) {

View File

@ -18,10 +18,7 @@ import (
"context"
"fmt"
"testing"
"time"
"github.com/golang-jwt/jwt"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
@ -205,75 +202,3 @@ func TestJWTBad(t *testing.T) {
func testJWTOpts() string {
return fmt.Sprintf("%s,pub-key=%s,priv-key=%s,sign-method=RS256", tokenTypeJWT, jwtRSAPubKey, jwtRSAPrivKey)
}
func TestJWTTokenWithMissingFields(t *testing.T) {
testCases := []struct {
name string
username string // An empty string means not present
revision uint64 // 0 means not present
expectValid bool
}{
{
name: "valid token",
username: "hello",
revision: 100,
expectValid: true,
},
{
name: "no username",
username: "",
revision: 100,
expectValid: false,
},
{
name: "no revision",
username: "hello",
revision: 0,
expectValid: false,
},
}
for _, tc := range testCases {
tc := tc
optsMap := map[string]string{
"priv-key": jwtRSAPrivKey,
"sign-method": "RS256",
"ttl": "1h",
}
t.Run(tc.name, func(t *testing.T) {
// prepare claims
claims := jwt.MapClaims{
"exp": time.Now().Add(time.Hour).Unix(),
}
if tc.username != "" {
claims["username"] = tc.username
}
if tc.revision != 0 {
claims["revision"] = tc.revision
}
// generate a JWT token with the given claims
var opts jwtOptions
err := opts.ParseWithDefaults(optsMap)
require.NoError(t, err)
key, err := opts.Key()
require.NoError(t, err)
tk := jwt.NewWithClaims(opts.SignMethod, claims)
token, err := tk.SignedString(key)
require.NoError(t, err)
// verify the token
jwtProvider, err := newTokenProviderJWT(zap.NewNop(), optsMap)
require.NoError(t, err)
ai, ok := jwtProvider.info(context.TODO(), token, 123)
require.Equal(t, tc.expectValid, ok)
if ok {
require.Equal(t, tc.username, ai.Username)
require.Equal(t, tc.revision, ai.Revision)
}
})
}
}

View File

@ -21,7 +21,7 @@ import (
"io/ioutil"
"time"
"github.com/golang-jwt/jwt"
jwt "github.com/dgrijalva/jwt-go"
)
const (

View File

@ -76,10 +76,8 @@ func checkKeyInterval(
cachedPerms *unifiedRangePermissions,
key, rangeEnd []byte,
permtyp authpb.Permission_Type) bool {
if isOpenEnded(rangeEnd) {
if len(rangeEnd) == 1 && rangeEnd[0] == 0 {
rangeEnd = nil
// nil rangeEnd will be converetd to []byte{}, the largest element of BytesAffineComparable,
// in NewBytesAffineInterval().
}
ivl := adt.NewBytesAffineInterval(key, rangeEnd)
@ -115,99 +113,41 @@ func checkKeyPoint(lg *zap.Logger, cachedPerms *unifiedRangePermissions, key []b
return false
}
func (as *authStore) isRangeOpPermitted(userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
as.rangePermCacheMu.RLock()
defer as.rangePermCacheMu.RUnlock()
rangePerm, ok := as.rangePermCache[userName]
func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
// assumption: tx is Lock()ed
_, ok := as.rangePermCache[userName]
if !ok {
as.lg.Error(
"user doesn't exist",
zap.String("user-name", userName),
)
return false
}
if len(rangeEnd) == 0 {
return checkKeyPoint(as.lg, rangePerm, key, permtyp)
}
return checkKeyInterval(as.lg, rangePerm, key, rangeEnd, permtyp)
}
func (as *authStore) refreshRangePermCache(tx backend.BatchTx) {
// Note that every authentication configuration update calls this method and it invalidates the entire
// rangePermCache and reconstruct it based on information of users and roles stored in the backend.
// This can be a costly operation.
as.rangePermCacheMu.Lock()
defer as.rangePermCacheMu.Unlock()
as.rangePermCache = make(map[string]*unifiedRangePermissions)
users := getAllUsers(as.lg, tx)
for _, user := range users {
userName := string(user.Name)
perms := getMergedPerms(as.lg, tx, userName)
if perms == nil {
as.lg.Error(
if as.lg != nil {
as.lg.Warn(
"failed to create a merged permission",
zap.String("user-name", userName),
)
continue
} else {
plog.Errorf("failed to create a unified permission of user %s", userName)
}
return false
}
as.rangePermCache[userName] = perms
}
if len(rangeEnd) == 0 {
return checkKeyPoint(as.lg, as.rangePermCache[userName], key, permtyp)
}
return checkKeyInterval(as.lg, as.rangePermCache[userName], key, rangeEnd, permtyp)
}
func (as *authStore) clearCachedPerm() {
as.rangePermCache = make(map[string]*unifiedRangePermissions)
}
func (as *authStore) invalidateCachedPerm(userName string) {
delete(as.rangePermCache, userName)
}
type unifiedRangePermissions struct {
readPerms adt.IntervalTree
writePerms adt.IntervalTree
}
// Constraints related to key range
// Assumptions:
// a1. key must be non-nil
// a2. []byte{} (in the case of string, "") is not a valid key of etcd
// For representing an open-ended range, BytesAffineComparable uses []byte{} as the largest element.
// a3. []byte{0x00} is the minimum valid etcd key
//
// Based on the above assumptions, key and rangeEnd must follow below rules:
// b1. for representing a single key point, rangeEnd should be nil or zero length byte array (in the case of string, "")
// Rule a2 guarantees that (X, []byte{}) for any X is not a valid range. So such ranges can be used for representing
// a single key permission.
//
// b2. key range with upper limit, like (X, Y), larger or equal to X and smaller than Y
//
// b3. key range with open-ended, like (X, <open ended>), is represented like (X, []byte{0x00})
// Because of rule a3, if we have (X, []byte{0x00}), such a range represents an empty range and makes no sense to have
// such a permission. So we use []byte{0x00} for representing an open-ended permission.
// Note that rangeEnd with []byte{0x00} will be converted into []byte{} before inserted into the interval tree
// (rule a2 ensures that this is the largest element).
// Special range like key = []byte{0x00} and rangeEnd = []byte{0x00} is treated as a range which matches with all keys.
//
// Treating a range whose rangeEnd with []byte{0x00} as an open-ended comes from the rules of Range() and Watch() API.
func isOpenEnded(rangeEnd []byte) bool { // check rule b3
return len(rangeEnd) == 1 && rangeEnd[0] == 0
}
func isValidPermissionRange(key, rangeEnd []byte) bool {
if len(key) == 0 {
return false
}
if rangeEnd == nil || len(rangeEnd) == 0 { // ensure rule b1
return true
}
begin := adt.BytesAffineComparable(key)
end := adt.BytesAffineComparable(rangeEnd)
if begin.Compare(end) == -1 { // rule b2
return true
}
if isOpenEnded(rangeEnd) {
return true
}
return false
}

View File

@ -45,26 +45,6 @@ func TestRangePermission(t *testing.T) {
[]byte("a"), []byte("f"),
true,
},
{
[]adt.Interval{adt.NewBytesAffineInterval([]byte("a"), []byte("d")), adt.NewBytesAffineInterval([]byte("a"), []byte("b")), adt.NewBytesAffineInterval([]byte("c"), []byte("f"))},
[]byte("a"), []byte{},
false,
},
{
[]adt.Interval{adt.NewBytesAffineInterval([]byte("a"), []byte{})},
[]byte("a"), []byte{},
true,
},
{
[]adt.Interval{adt.NewBytesAffineInterval([]byte{0x00}, []byte{})},
[]byte("a"), []byte{},
true,
},
{
[]adt.Interval{adt.NewBytesAffineInterval([]byte{0x00}, []byte{})},
[]byte{0x00}, []byte{},
true,
},
}
for i, tt := range tests {
@ -106,16 +86,6 @@ func TestKeyPermission(t *testing.T) {
[]byte("f"),
false,
},
{
[]adt.Interval{adt.NewBytesAffineInterval([]byte("a"), []byte("d")), adt.NewBytesAffineInterval([]byte("a"), []byte("b")), adt.NewBytesAffineInterval([]byte("c"), []byte{})},
[]byte("f"),
true,
},
{
[]adt.Interval{adt.NewBytesAffineInterval([]byte("a"), []byte("d")), adt.NewBytesAffineInterval([]byte("a"), []byte("b")), adt.NewBytesAffineInterval([]byte{0x00}, []byte{})},
[]byte("f"),
true,
},
}
for i, tt := range tests {
@ -130,88 +100,3 @@ func TestKeyPermission(t *testing.T) {
}
}
}
func TestRangeCheck(t *testing.T) {
tests := []struct {
name string
key []byte
rangeEnd []byte
want bool
}{
{
name: "valid single key",
key: []byte("a"),
rangeEnd: []byte(""),
want: true,
},
{
name: "valid single key",
key: []byte("a"),
rangeEnd: nil,
want: true,
},
{
name: "valid key range, key < rangeEnd",
key: []byte("a"),
rangeEnd: []byte("b"),
want: true,
},
{
name: "invalid empty key range, key == rangeEnd",
key: []byte("a"),
rangeEnd: []byte("a"),
want: false,
},
{
name: "invalid empty key range, key > rangeEnd",
key: []byte("b"),
rangeEnd: []byte("a"),
want: false,
},
{
name: "invalid key, key must not be \"\"",
key: []byte(""),
rangeEnd: []byte("a"),
want: false,
},
{
name: "invalid key range, key must not be \"\"",
key: []byte(""),
rangeEnd: []byte(""),
want: false,
},
{
name: "invalid key range, key must not be \"\"",
key: []byte(""),
rangeEnd: []byte("\x00"),
want: false,
},
{
name: "valid single key (not useful in practice)",
key: []byte("\x00"),
rangeEnd: []byte(""),
want: true,
},
{
name: "valid key range, larger or equals to \"a\"",
key: []byte("a"),
rangeEnd: []byte("\x00"),
want: true,
},
{
name: "valid key range, which includes all keys",
key: []byte("\x00"),
rangeEnd: []byte("\x00"),
want: true,
},
}
for i, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := isValidPermissionRange(tt.key, tt.rangeEnd)
if result != tt.want {
t.Errorf("#%d: result=%t, want=%t", i, result, tt.want)
}
})
}
}

View File

@ -159,11 +159,6 @@ func (t *tokenSimple) invalidateUser(username string) {
}
func (t *tokenSimple) enable() {
t.simpleTokensMu.Lock()
defer t.simpleTokensMu.Unlock()
if t.simpleTokenKeeper != nil { // already enabled
return
}
if t.simpleTokenTTL <= 0 {
t.simpleTokenTTL = simpleTokenTTLDefault
}

View File

@ -59,7 +59,6 @@ var (
ErrRoleAlreadyExist = errors.New("auth: role already exists")
ErrRoleNotFound = errors.New("auth: role not found")
ErrRoleEmpty = errors.New("auth: role name is empty")
ErrPermissionNotGiven = errors.New("auth: permission not given")
ErrAuthFailed = errors.New("auth: authentication failed, invalid user ID or password")
ErrNoPasswordUser = errors.New("auth: authentication failed, password was given for no password user")
ErrPermissionDenied = errors.New("auth: permission denied")
@ -216,14 +215,7 @@ type authStore struct {
enabled bool
enabledMu sync.RWMutex
// rangePermCache needs to be protected by rangePermCacheMu
// rangePermCacheMu needs to be write locked only in initialization phase or configuration changes
// Hot paths like Range(), needs to acquire read lock for improving performance
//
// Note that BatchTx and ReadTx cannot be a mutex for rangePermCache because they are independent resources
// see also: https://github.com/etcd-io/etcd/pull/13920#discussion_r849114855
rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions
rangePermCacheMu sync.RWMutex
tokenProvider TokenProvider
syncConsistentIndex saveConsistentIndexFunc
@ -266,7 +258,7 @@ func (as *authStore) AuthEnable() error {
as.enabled = true
as.tokenProvider.enable()
as.refreshRangePermCache(tx)
as.rangePermCache = make(map[string]*unifiedRangePermissions)
as.setRevision(getRevision(tx))
@ -403,15 +395,11 @@ func (as *authStore) Recover(be backend.Backend) {
}
as.setRevision(getRevision(tx))
as.refreshRangePermCache(tx)
tx.Unlock()
as.enabledMu.Lock()
as.enabled = enabled
if enabled {
as.tokenProvider.enable()
}
as.enabledMu.Unlock()
}
@ -466,7 +454,6 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse,
as.commitRevision(tx)
as.saveConsistentIndex(tx)
as.refreshRangePermCache(tx)
if as.lg != nil {
as.lg.Info("added a user", zap.String("user-name", r.Name))
@ -499,8 +486,8 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete
as.commitRevision(tx)
as.saveConsistentIndex(tx)
as.refreshRangePermCache(tx)
as.invalidateCachedPerm(r.Name)
as.tokenProvider.invalidateUser(r.Name)
if as.lg != nil {
@ -552,8 +539,8 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p
as.commitRevision(tx)
as.saveConsistentIndex(tx)
as.refreshRangePermCache(tx)
as.invalidateCachedPerm(r.Name)
as.tokenProvider.invalidateUser(r.Name)
if as.lg != nil {
@ -605,9 +592,10 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser
putUser(as.lg, tx, user)
as.invalidateCachedPerm(r.User)
as.commitRevision(tx)
as.saveConsistentIndex(tx)
as.refreshRangePermCache(tx)
if as.lg != nil {
as.lg.Info(
@ -691,9 +679,10 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs
putUser(as.lg, tx, updatedUser)
as.invalidateCachedPerm(r.Name)
as.commitRevision(tx)
as.saveConsistentIndex(tx)
as.refreshRangePermCache(tx)
if as.lg != nil {
as.lg.Info(
@ -763,9 +752,12 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest)
putRole(as.lg, tx, updatedRole)
// TODO(mitake): currently single role update invalidates every cache
// It should be optimized.
as.clearCachedPerm()
as.commitRevision(tx)
as.saveConsistentIndex(tx)
as.refreshRangePermCache(tx)
if as.lg != nil {
as.lg.Info(
@ -821,11 +813,11 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete
putUser(as.lg, tx, updatedUser)
as.invalidateCachedPerm(string(user.Name))
}
as.commitRevision(tx)
as.saveConsistentIndex(tx)
as.refreshRangePermCache(tx)
if as.lg != nil {
as.lg.Info("deleted a role", zap.String("role-name", r.Role))
@ -885,13 +877,6 @@ func (perms permSlice) Swap(i, j int) {
}
func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
if r.Perm == nil {
return nil, ErrPermissionNotGiven
}
if !isValidPermissionRange(r.Perm.Key, r.Perm.RangeEnd) {
return nil, ErrInvalidAuthMgmt
}
tx := as.be.BatchTx()
tx.Lock()
defer tx.Unlock()
@ -922,9 +907,12 @@ func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (
putRole(as.lg, tx, role)
// TODO(mitake): currently single role update invalidates every cache
// It should be optimized.
as.clearCachedPerm()
as.commitRevision(tx)
as.saveConsistentIndex(tx)
as.refreshRangePermCache(tx)
if as.lg != nil {
as.lg.Info(
@ -985,7 +973,7 @@ func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeE
return nil
}
if as.isRangeOpPermitted(userName, key, rangeEnd, permTyp) {
if as.isRangeOpPermitted(tx, userName, key, rangeEnd, permTyp) {
return nil
}
@ -1051,15 +1039,7 @@ func getUser(lg *zap.Logger, tx backend.BatchTx, username string) *authpb.User {
}
func getAllUsers(lg *zap.Logger, tx backend.BatchTx) []*authpb.User {
var vs [][]byte
err := tx.UnsafeForEach(authUsersBucketName, func(k []byte, v []byte) error {
vs = append(vs, v)
return nil
})
if err != nil {
lg.Panic("failed to get users",
zap.Error(err))
}
_, vs := tx.UnsafeRange(authUsersBucketName, []byte{0}, []byte{0xff}, -1)
if len(vs) == 0 {
return nil
}
@ -1212,8 +1192,6 @@ func NewAuthStore(lg *zap.Logger, be backend.Backend, tp TokenProvider, bcryptCo
as.setupMetricsReporter()
as.refreshRangePermCache(tx)
tx.Unlock()
be.ForceCommit()

View File

@ -16,7 +16,6 @@ package auth
import (
"context"
"errors"
"fmt"
"os"
"reflect"
@ -29,7 +28,6 @@ import (
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"go.etcd.io/etcd/mvcc/backend"
"go.etcd.io/etcd/pkg/adt"
"go.uber.org/zap"
"golang.org/x/crypto/bcrypt"
@ -153,8 +151,7 @@ func TestUserAdd(t *testing.T) {
as, tearDown := setupAuthStore(t)
defer tearDown(t)
const userName = "foo"
ua := &pb.AuthUserAddRequest{Name: userName, Options: &authpb.UserAddOptions{NoPassword: false}}
ua := &pb.AuthUserAddRequest{Name: "foo", Options: &authpb.UserAddOptions{NoPassword: false}}
_, err := as.UserAdd(ua) // add an existing user
if err == nil {
t.Fatalf("expected %v, got %v", ErrUserAlreadyExist, err)
@ -168,11 +165,6 @@ func TestUserAdd(t *testing.T) {
if err != ErrUserEmpty {
t.Fatal(err)
}
if _, ok := as.rangePermCache[userName]; !ok {
t.Fatalf("user %s should be added but it doesn't exist in rangePermCache", userName)
}
}
func TestRecover(t *testing.T) {
@ -187,30 +179,6 @@ func TestRecover(t *testing.T) {
}
}
func TestRecoverWithEmptyRangePermCache(t *testing.T) {
as, tearDown := setupAuthStore(t)
defer as.Close()
defer tearDown(t)
as.enabled = false
as.rangePermCache = map[string]*unifiedRangePermissions{}
as.Recover(as.be)
if !as.IsAuthEnabled() {
t.Fatalf("expected auth enabled got disabled")
}
if len(as.rangePermCache) != 2 {
t.Fatalf("rangePermCache should have permission information for 2 users (\"root\" and \"foo\"), but has %d information", len(as.rangePermCache))
}
if _, ok := as.rangePermCache["root"]; !ok {
t.Fatal("user \"root\" should be created by setupAuthStore() but doesn't exist in rangePermCache")
}
if _, ok := as.rangePermCache["foo"]; !ok {
t.Fatal("user \"foo\" should be created by setupAuthStore() but doesn't exist in rangePermCache")
}
}
func TestCheckPassword(t *testing.T) {
as, tearDown := setupAuthStore(t)
defer tearDown(t)
@ -245,8 +213,7 @@ func TestUserDelete(t *testing.T) {
defer tearDown(t)
// delete an existing user
const userName = "foo"
ud := &pb.AuthUserDeleteRequest{Name: userName}
ud := &pb.AuthUserDeleteRequest{Name: "foo"}
_, err := as.UserDelete(ud)
if err != nil {
t.Fatal(err)
@ -260,47 +227,6 @@ func TestUserDelete(t *testing.T) {
if err != ErrUserNotFound {
t.Fatalf("expected %v, got %v", ErrUserNotFound, err)
}
if _, ok := as.rangePermCache[userName]; ok {
t.Fatalf("user %s should be deleted but it exists in rangePermCache", userName)
}
}
func TestUserDeleteAndPermCache(t *testing.T) {
as, tearDown := setupAuthStore(t)
defer tearDown(t)
// delete an existing user
const deletedUserName = "foo"
ud := &pb.AuthUserDeleteRequest{Name: deletedUserName}
_, err := as.UserDelete(ud)
if err != nil {
t.Fatal(err)
}
// delete a non-existing user
_, err = as.UserDelete(ud)
if err != ErrUserNotFound {
t.Fatalf("expected %v, got %v", ErrUserNotFound, err)
}
if _, ok := as.rangePermCache[deletedUserName]; ok {
t.Fatalf("user %s should be deleted but it exists in rangePermCache", deletedUserName)
}
// add a new user
const newUser = "bar"
ua := &pb.AuthUserAddRequest{Name: newUser, Options: &authpb.UserAddOptions{NoPassword: false}}
_, err = as.UserAdd(ua)
if err != nil {
t.Fatal(err)
}
if _, ok := as.rangePermCache[newUser]; !ok {
t.Fatalf("user %s should exist but it doesn't exist in rangePermCache", deletedUserName)
}
}
func TestUserChangePassword(t *testing.T) {
@ -519,162 +445,6 @@ func TestRoleGrantPermission(t *testing.T) {
if !reflect.DeepEqual(perm, r.Perm[0]) {
t.Errorf("expected %v, got %v", perm, r.Perm[0])
}
// trying to grant nil permissions returns an error (and doesn't change the actual permissions!)
_, err = as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{
Name: "role-test-1",
})
if err != ErrPermissionNotGiven {
t.Error(err)
}
r, err = as.RoleGet(&pb.AuthRoleGetRequest{Role: "role-test-1"})
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(perm, r.Perm[0]) {
t.Errorf("expected %v, got %v", perm, r.Perm[0])
}
}
func TestRoleGrantInvalidPermission(t *testing.T) {
as, tearDown := setupAuthStore(t)
defer tearDown(t)
_, err := as.RoleAdd(&pb.AuthRoleAddRequest{Name: "role-test-1"})
if err != nil {
t.Fatal(err)
}
tests := []struct {
name string
perm *authpb.Permission
want error
}{
{
name: "valid range",
perm: &authpb.Permission{
PermType: authpb.WRITE,
Key: []byte("Keys"),
RangeEnd: []byte("RangeEnd"),
},
want: nil,
},
{
name: "invalid range: nil key",
perm: &authpb.Permission{
PermType: authpb.WRITE,
Key: nil,
RangeEnd: []byte("RangeEnd"),
},
want: ErrInvalidAuthMgmt,
},
{
name: "valid range: single key",
perm: &authpb.Permission{
PermType: authpb.WRITE,
Key: []byte("Keys"),
RangeEnd: nil,
},
want: nil,
},
{
name: "valid range: single key",
perm: &authpb.Permission{
PermType: authpb.WRITE,
Key: []byte("Keys"),
RangeEnd: []byte{},
},
want: nil,
},
{
name: "invalid range: empty (Key == RangeEnd)",
perm: &authpb.Permission{
PermType: authpb.WRITE,
Key: []byte("a"),
RangeEnd: []byte("a"),
},
want: ErrInvalidAuthMgmt,
},
{
name: "invalid range: empty (Key > RangeEnd)",
perm: &authpb.Permission{
PermType: authpb.WRITE,
Key: []byte("b"),
RangeEnd: []byte("a"),
},
want: ErrInvalidAuthMgmt,
},
{
name: "invalid range: length of key is 0",
perm: &authpb.Permission{
PermType: authpb.WRITE,
Key: []byte(""),
RangeEnd: []byte("a"),
},
want: ErrInvalidAuthMgmt,
},
{
name: "invalid range: length of key is 0",
perm: &authpb.Permission{
PermType: authpb.WRITE,
Key: []byte(""),
RangeEnd: []byte(""),
},
want: ErrInvalidAuthMgmt,
},
{
name: "invalid range: length of key is 0",
perm: &authpb.Permission{
PermType: authpb.WRITE,
Key: []byte(""),
RangeEnd: []byte{0x00},
},
want: ErrInvalidAuthMgmt,
},
{
name: "valid range: single key permission for []byte{0x00}",
perm: &authpb.Permission{
PermType: authpb.WRITE,
Key: []byte{0x00},
RangeEnd: []byte(""),
},
want: nil,
},
{
name: "valid range: \"a\" or larger keys",
perm: &authpb.Permission{
PermType: authpb.WRITE,
Key: []byte("a"),
RangeEnd: []byte{0x00},
},
want: nil,
},
{
name: "valid range: the entire keys",
perm: &authpb.Permission{
PermType: authpb.WRITE,
Key: []byte{0x00},
RangeEnd: []byte{0x00},
},
want: nil,
},
}
for i, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, err = as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{
Name: "role-test-1",
Perm: tt.perm,
})
if !errors.Is(err, tt.want) {
t.Errorf("#%d: result=%t, want=%t", i, err, tt.want)
}
})
}
}
func TestRoleRevokePermission(t *testing.T) {
@ -733,44 +503,17 @@ func TestUserRevokePermission(t *testing.T) {
t.Fatal(err)
}
const userName = "foo"
_, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: userName, Role: "role-test"})
_, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: "foo", Role: "role-test"})
if err != nil {
t.Fatal(err)
}
_, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: userName, Role: "role-test-1"})
_, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: "foo", Role: "role-test-1"})
if err != nil {
t.Fatal(err)
}
perm := &authpb.Permission{
PermType: authpb.WRITE,
Key: []byte("WriteKeyBegin"),
RangeEnd: []byte("WriteKeyEnd"),
}
_, err = as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{
Name: "role-test-1",
Perm: perm,
})
if err != nil {
t.Fatal(err)
}
if _, ok := as.rangePermCache[userName]; !ok {
t.Fatalf("User %s should have its entry in rangePermCache", userName)
}
unifiedPerm := as.rangePermCache[userName]
pt1 := adt.NewBytesAffinePoint([]byte("WriteKeyBegin"))
if !unifiedPerm.writePerms.Contains(pt1) {
t.Fatal("rangePermCache should contain WriteKeyBegin")
}
pt2 := adt.NewBytesAffinePoint([]byte("OutOfRange"))
if unifiedPerm.writePerms.Contains(pt2) {
t.Fatal("rangePermCache should not contain OutOfRange")
}
u, err := as.UserGet(&pb.AuthUserGetRequest{Name: userName})
u, err := as.UserGet(&pb.AuthUserGetRequest{Name: "foo"})
if err != nil {
t.Fatal(err)
}
@ -780,12 +523,12 @@ func TestUserRevokePermission(t *testing.T) {
t.Fatalf("expected %v, got %v", expected, u.Roles)
}
_, err = as.UserRevokeRole(&pb.AuthUserRevokeRoleRequest{Name: userName, Role: "role-test-1"})
_, err = as.UserRevokeRole(&pb.AuthUserRevokeRoleRequest{Name: "foo", Role: "role-test-1"})
if err != nil {
t.Fatal(err)
}
u, err = as.UserGet(&pb.AuthUserGetRequest{Name: userName})
u, err = as.UserGet(&pb.AuthUserGetRequest{Name: "foo"})
if err != nil {
t.Fatal(err)
}

View File

@ -44,6 +44,15 @@
}
]
},
{
"project": "github.com/dgrijalva/jwt-go",
"licenses": [
{
"type": "MIT License",
"confidence": 0.9891304347826086
}
]
},
{
"project": "github.com/dustin/go-humanize",
"licenses": [
@ -62,15 +71,6 @@
}
]
},
{
"project": "github.com/golang-jwt/jwt",
"licenses": [
{
"type": "MIT License",
"confidence": 0.9891304347826086
}
]
},
{
"project": "github.com/golang/groupcache/lru",
"licenses": [
@ -378,7 +378,7 @@
]
},
{
"project": "golang.org/x/sys",
"project": "golang.org/x/sys/unix",
"licenses": [
{
"type": "BSD 3-clause \"New\" or \"Revised\" License",

22
build
View File

@ -1,24 +1,22 @@
#!/usr/bin/env bash
set -euo pipefail
# set some environment variables
ORG_PATH="go.etcd.io"
REPO_PATH="${ORG_PATH}/etcd"
GIT_SHA=$(git rev-parse --short HEAD || echo "GitNotFound")
if [[ -n "${FAILPOINTS:-}" ]]; then
if [[ -n "$FAILPOINTS" ]]; then
GIT_SHA="$GIT_SHA"-FAILPOINTS
fi
# Set GO_LDFLAGS="-s" for building without symbols for debugging.
GO_LDFLAGS="${GO_LDFLAGS:-} -X ${REPO_PATH}/version.GitSHA=${GIT_SHA}"
GO_LDFLAGS="$GO_LDFLAGS -X ${REPO_PATH}/version.GitSHA=${GIT_SHA}"
# enable/disable failpoints
toggle_failpoints() {
mode="$1"
if command -v gofail >/dev/null 2>&1; then
gofail "$mode" etcdserver/ mvcc/backend/ wal/
gofail "$mode" etcdserver/ mvcc/backend/
elif [[ "$mode" != "disable" ]]; then
echo "FAILPOINTS set but gofail not found"
exit 1
@ -32,7 +30,7 @@ etcd_setup_gopath() {
cd "$CDIR" || return
etcdGOPATH="${CDIR}/gopath"
# preserve old gopath to support building with unvendored tooling deps (e.g., gofail)
if [[ -n "${GOPATH:-}" ]]; then
if [[ -n "$GOPATH" ]]; then
GOPATH=":$GOPATH"
fi
rm -rf "${etcdGOPATH:?}/"
@ -44,23 +42,23 @@ etcd_setup_gopath() {
toggle_failpoints_default() {
mode="disable"
if [[ -n "${FAILPOINTS:-}" ]]; then mode="enable"; fi
if [[ -n "$FAILPOINTS" ]]; then mode="enable"; fi
toggle_failpoints "$mode"
}
etcd_build() {
out="bin"
if [[ -n "${BINDIR:-}" ]]; then out="${BINDIR}"; fi
if [[ -n "${BINDIR}" ]]; then out="${BINDIR}"; fi
toggle_failpoints_default
# Static compilation is useful when etcd is run in a container. $GO_BUILD_FLAGS is OK
# shellcheck disable=SC2086
CGO_ENABLED=0 go build ${GO_BUILD_FLAGS:-} \
CGO_ENABLED=0 go build $GO_BUILD_FLAGS \
-installsuffix cgo \
-ldflags "$GO_LDFLAGS" \
-o "${out}/etcd" ${REPO_PATH} || return
# shellcheck disable=SC2086
CGO_ENABLED=0 go build ${GO_BUILD_FLAGS:-} \
CGO_ENABLED=0 go build $GO_BUILD_FLAGS \
-installsuffix cgo \
-ldflags "$GO_LDFLAGS" \
-o "${out}/etcdctl" ${REPO_PATH}/etcdctl || return
@ -68,7 +66,7 @@ etcd_build() {
tools_build() {
out="bin"
if [[ -n "${BINDIR:-}" ]]; then out="${BINDIR}"; fi
if [[ -n "${BINDIR}" ]]; then out="${BINDIR}"; fi
tools_path="tools/benchmark
tools/etcd-dump-db
tools/etcd-dump-logs
@ -90,7 +88,7 @@ tools_build() {
toggle_failpoints_default
if [[ "${ETCD_SETUP_GOPATH:-}" == "1" ]]; then
if [[ "${ETCD_SETUP_GOPATH}" == "1" ]]; then
etcd_setup_gopath
fi

View File

@ -68,5 +68,6 @@ Use a custom context to set timeouts on your operations:
// handle error
}
}
*/
package client

View File

@ -19,6 +19,7 @@ import (
"fmt"
"strings"
"testing"
"time"
"go.etcd.io/etcd/clientv3/balancer/picker"
"go.etcd.io/etcd/clientv3/balancer/resolver/endpoint"
@ -91,25 +92,24 @@ func TestRoundRobinBalancedResolvableNoFailover(t *testing.T) {
return picked, err
}
_, picked, err := warmupConnections(reqFunc, tc.serverCount, "")
if err != nil {
t.Fatalf("Unexpected failure %v", err)
}
// verify that we round robin
prev, switches := picked, 0
prev, switches := "", 0
for i := 0; i < tc.reqN; i++ {
picked, err = reqFunc(context.Background())
picked, err := reqFunc(context.Background())
if err != nil {
t.Fatalf("#%d: unexpected failure %v", i, err)
}
if prev == "" {
prev = picked
continue
}
if prev != picked {
switches++
}
prev = picked
}
if tc.serverCount > 1 && switches != tc.reqN {
t.Fatalf("expected balanced loads for %d requests, got switches %d", tc.reqN, switches)
if tc.serverCount > 1 && switches < tc.reqN-3 { // -3 for initial resolutions
// TODO: FIX ME
t.Skipf("expected balanced loads for %d requests, got switches %d", tc.reqN, switches)
}
})
}
@ -160,21 +160,26 @@ func TestRoundRobinBalancedResolvableFailoverFromServerFail(t *testing.T) {
}
// stop first server, loads should be redistributed
// stopped server should never be picked
ms.StopAt(0)
// stopped server will be transitioned into TRANSIENT_FAILURE state
// but it doesn't happen instantaneously and it can still be picked for a short period of time
// we ignore "transport is closing" in such case
available, picked, err := warmupConnections(reqFunc, serverCount-1, "transport is closing")
if err != nil {
t.Fatalf("Unexpected failure %v", err)
available := make(map[string]struct{})
for i := 1; i < serverCount; i++ {
available[eps[i]] = struct{}{}
}
reqN := 10
prev, switches := picked, 0
prev, switches := "", 0
for i := 0; i < reqN; i++ {
picked, err = reqFunc(context.Background())
if err != nil {
t.Fatalf("#%d: unexpected failure %v", i, err)
picked, err := reqFunc(context.Background())
if err != nil && strings.Contains(err.Error(), "transport is closing") {
continue
}
if prev == "" { // first failover
if eps[0] == picked {
t.Fatalf("expected failover from %q, picked %q", eps[0], picked)
}
prev = picked
continue
}
if _, ok := available[picked]; !ok {
t.Fatalf("picked unavailable address %q (available %v)", picked, available)
@ -184,18 +189,18 @@ func TestRoundRobinBalancedResolvableFailoverFromServerFail(t *testing.T) {
}
prev = picked
}
if switches != reqN {
t.Fatalf("expected balanced loads for %d requests, got switches %d", reqN, switches)
if switches < reqN-3 { // -3 for initial resolutions + failover
// TODO: FIX ME!
t.Skipf("expected balanced loads for %d requests, got switches %d", reqN, switches)
}
// now failed server comes back
ms.StartAt(0)
available, picked, err = warmupConnections(reqFunc, serverCount, "")
if err != nil {
t.Fatalf("Unexpected failure %v", err)
}
prev, switches = picked, 0
// enough time for reconnecting to recovered server
time.Sleep(time.Second)
prev, switches = "", 0
recoveredAddr, recovered := eps[0], 0
available[recoveredAddr] = struct{}{}
@ -204,6 +209,10 @@ func TestRoundRobinBalancedResolvableFailoverFromServerFail(t *testing.T) {
if err != nil {
t.Fatalf("#%d: unexpected failure %v", i, err)
}
if prev == "" {
prev = picked
continue
}
if _, ok := available[picked]; !ok {
t.Fatalf("#%d: picked unavailable address %q (available %v)", i, picked, available)
}
@ -215,10 +224,10 @@ func TestRoundRobinBalancedResolvableFailoverFromServerFail(t *testing.T) {
}
prev = picked
}
if switches != 2*reqN {
if switches < reqN-3 { // -3 for initial resolutions
t.Fatalf("expected balanced loads for %d requests, got switches %d", reqN, switches)
}
if recovered != 2*reqN/serverCount {
if recovered < reqN/serverCount {
t.Fatalf("recovered server %q got only %d requests", recoveredAddr, recovered)
}
}
@ -233,10 +242,11 @@ func TestRoundRobinBalancedResolvableFailoverFromRequestFail(t *testing.T) {
}
defer ms.Stop()
var eps []string
available := make(map[string]struct{})
for _, svr := range ms.Servers {
eps = append(eps, svr.ResolverAddress().Addr)
available[svr.Address] = struct{}{}
}
rsv, err := endpoint.NewResolverGroup("requestfail")
if err != nil {
t.Fatal(err)
@ -267,11 +277,6 @@ func TestRoundRobinBalancedResolvableFailoverFromRequestFail(t *testing.T) {
return picked, err
}
available, picked, err := warmupConnections(reqFunc, serverCount, "")
if err != nil {
t.Fatalf("Unexpected failure %v", err)
}
reqN := 20
prev, switches := "", 0
for i := 0; i < reqN; i++ {
@ -280,13 +285,17 @@ func TestRoundRobinBalancedResolvableFailoverFromRequestFail(t *testing.T) {
if i%2 == 0 {
cancel()
}
picked, err = reqFunc(ctx)
picked, err := reqFunc(ctx)
if i%2 == 0 {
if s, ok := status.FromError(err); ok && s.Code() != codes.Canceled {
if s, ok := status.FromError(err); ok && s.Code() != codes.Canceled || picked != "" {
t.Fatalf("#%d: expected %v, got %v", i, context.Canceled, err)
}
continue
}
if prev == "" && picked != "" {
prev = picked
continue
}
if _, ok := available[picked]; !ok {
t.Fatalf("#%d: picked unavailable address %q (available %v)", i, picked, available)
}
@ -295,29 +304,7 @@ func TestRoundRobinBalancedResolvableFailoverFromRequestFail(t *testing.T) {
}
prev = picked
}
if switches != reqN/2 {
if switches < reqN/2-3 { // -3 for initial resolutions + failover
t.Fatalf("expected balanced loads for %d requests, got switches %d", reqN, switches)
}
}
type reqFuncT = func(ctx context.Context) (picked string, err error)
func warmupConnections(reqFunc reqFuncT, serverCount int, ignoreErr string) (map[string]struct{}, string, error) {
var picked string
var err error
available := make(map[string]struct{})
// cycle through all peers to indirectly verify that balancer subconn list is fully loaded
// otherwise we can't reliably count switches between 'picked' peers in the test assert phase
for len(available) < serverCount {
picked, err = reqFunc(context.Background())
if err != nil {
if ignoreErr != "" && strings.Contains(err.Error(), ignoreErr) {
// skip ignored errors
continue
}
return available, picked, err
}
available[picked] = struct{}{}
}
return available, picked, err
}

View File

@ -174,10 +174,8 @@ func (c *Client) Sync(ctx context.Context) error {
}
var eps []string
for _, m := range mresp.Members {
if len(m.Name) != 0 && !m.IsLearner {
eps = append(eps, m.ClientURLs...)
}
}
c.SetEndpoints(eps...)
return nil
}

View File

@ -22,7 +22,6 @@ import (
"time"
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
"go.etcd.io/etcd/etcdserver/etcdserverpb"
"go.etcd.io/etcd/pkg/testutil"
"google.golang.org/grpc"
@ -167,51 +166,3 @@ func TestCloseCtxClient(t *testing.T) {
t.Errorf("failed to Close the client. %v", err)
}
}
func TestSyncFiltersMembers(t *testing.T) {
defer testutil.AfterTest(t)
c, _ := New(Config{Endpoints: []string{"http://254.0.0.1:12345"}})
c.Cluster = &mockCluster{
[]*etcdserverpb.Member{
{ID: 0, Name: "", ClientURLs: []string{"http://254.0.0.1:12345"}, IsLearner: false},
{ID: 1, Name: "isStarted", ClientURLs: []string{"http://254.0.0.2:12345"}, IsLearner: true},
{ID: 2, Name: "isStartedAndNotLearner", ClientURLs: []string{"http://254.0.0.3:12345"}, IsLearner: false},
},
}
c.Sync(context.Background())
endpoints := c.Endpoints()
if len(endpoints) != 1 || endpoints[0] != "http://254.0.0.3:12345" {
t.Error("Client.Sync uses learner and/or non-started member client URLs")
}
c.Close()
}
type mockCluster struct {
members []*etcdserverpb.Member
}
func (mc *mockCluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
return &MemberListResponse{Members: mc.members}, nil
}
func (mc *mockCluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
return nil, nil
}
func (mc *mockCluster) MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
return nil, nil
}
func (mc *mockCluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
return nil, nil
}
func (mc *mockCluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
return nil, nil
}
func (mc *mockCluster) MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) {
return nil, nil
}

View File

@ -65,10 +65,14 @@ func TestResumeElection(t *testing.T) {
respChan := make(chan *clientv3.GetResponse)
go func() {
defer close(respChan)
o := e.Observe(ctx)
respChan <- nil
for resp := range o {
for {
select {
case resp, ok := <-o:
if !ok {
t.Fatal("Observe() channel closed prematurely")
}
// Ignore any observations that candidate1 was elected
if string(resp.Kvs[0].Value) == "candidate1" {
continue
@ -76,7 +80,7 @@ func TestResumeElection(t *testing.T) {
respChan <- &resp
return
}
t.Error("Observe() channel closed prematurely")
}
}()
// wait until observe goroutine is running

View File

@ -102,4 +102,5 @@
// The grpc load balancer is registered statically and is shared across etcd clients.
// To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment
// variable. E.g. "ETCD_CLIENT_DEBUG=1".
//
package clientv3

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !cluster_proxy
// +build !cluster_proxy
package integration

View File

@ -619,28 +619,16 @@ func TestLeasingTxnOwnerGet(t *testing.T) {
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
client := clus.Client(0)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
testutil.AssertNil(t, err)
defer func() {
// In '--tags cluster_proxy' mode the client need to be closed before
// closeLKV(). This interrupts all outstanding watches. Closing by closeLKV()
// is not sufficient as (unfortunately) context close does not interrupts Watches.
// See ./clientv3/watch.go:
// >> Currently, client contexts are overwritten with "valCtx" that never closes. <<
clus.TakeClient(0) // avoid double Close() of the client.
client.Close()
closeLKV()
}()
defer closeLKV()
keyCount := rand.Intn(10) + 1
var ops []clientv3.Op
presps := make([]*clientv3.PutResponse, keyCount)
for i := range presps {
k := fmt.Sprintf("k-%d", i)
presp, err := client.Put(context.TODO(), k, k+k)
presp, err := clus.Client(0).Put(context.TODO(), k, k+k)
if err != nil {
t.Fatal(err)
}

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !cluster_proxy
// +build !cluster_proxy
package integration

View File

@ -114,7 +114,7 @@ func authSetupRoot(t *testing.T, auth clientv3.Auth) {
func TestGetTokenWithoutAuth(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2})
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 10})
defer clus.Terminate(t)
authapi := clus.RandClient()
@ -130,7 +130,7 @@ func TestGetTokenWithoutAuth(t *testing.T) {
// "Username" and "Password" must be used
cfg := clientv3.Config{
Endpoints: authapi.Endpoints(),
DialTimeout: 5 * time.Second,
DialTimeout: 1 * time.Second, // make sure all connection time of connect all endpoint must be more DialTimeout
Username: "root",
Password: "123",
}
@ -142,7 +142,7 @@ func TestGetTokenWithoutAuth(t *testing.T) {
switch err {
case nil:
t.Log("passes as expected")
t.Log("passes as expected, but may be connection time less than DialTimeout")
case context.DeadlineExceeded:
t.Errorf("not expected result:%v with endpoint:%s", err, authapi.Endpoints())
case rpctypes.ErrAuthNotEnabled:
@ -150,4 +150,5 @@ func TestGetTokenWithoutAuth(t *testing.T) {
default:
t.Errorf("other errors:%v", err)
}
}

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !cluster_proxy
// +build !cluster_proxy
package integration

View File

@ -338,9 +338,6 @@ func putAndWatch(t *testing.T, wctx *watchctx, key, val string) {
if !ok {
t.Fatalf("unexpected watch close")
}
if err := v.Err(); err != nil {
t.Fatalf("unexpected watch response error: %v", err)
}
if string(v.Events[0].Kv.Value) != val {
t.Fatalf("bad value got %v, wanted %v", v.Events[0].Kv.Value, val)
}
@ -610,9 +607,6 @@ func TestConfigurableWatchProgressNotifyInterval(t *testing.T) {
}
func TestWatchRequestProgress(t *testing.T) {
if integration.ThroughProxy {
t.Skip("grpc-proxy does not support WatchProgress yet")
}
testCases := []struct {
name string
watchers []string

View File

@ -42,4 +42,5 @@
// }
// lkv2.Put(context.TODO(), "abc", "456")
// resp, err = lkv.Get("abc")
//
package leasing

View File

@ -39,4 +39,5 @@
// resp, _ = cli.Get(context.TODO(), "abc")
// fmt.Printf("%s\n", resp.Kvs[0].Value)
// // Output: 456
//
package namespace

View File

@ -52,4 +52,5 @@
// r := &etcdnaming.GRPCResolver{Client: c}
// return r.Update(c.Ctx(), service, naming.Update{Op: naming.Add, Addr: addr}, clientv3.WithLease(lid))
// }
//
package naming

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package naming_test
package naming
import (
"context"
@ -21,7 +21,6 @@ import (
"testing"
etcd "go.etcd.io/etcd/clientv3"
namingv3 "go.etcd.io/etcd/clientv3/naming"
"go.etcd.io/etcd/integration"
"go.etcd.io/etcd/pkg/testutil"
@ -34,7 +33,7 @@ func TestGRPCResolver(t *testing.T) {
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
r := namingv3.GRPCResolver{
r := GRPCResolver{
Client: clus.RandClient(),
}
@ -108,7 +107,7 @@ func TestGRPCResolverMulti(t *testing.T) {
t.Fatal(err)
}
r := namingv3.GRPCResolver{c}
r := GRPCResolver{c}
w, err := r.Resolve("foo")
if err != nil {

View File

@ -77,9 +77,6 @@ type Op struct {
cmps []Cmp
thenOps []Op
elseOps []Op
isOptsWithFromKey bool
isOptsWithPrefix bool
}
// accessors / mutators
@ -219,10 +216,6 @@ func (op Op) isWrite() bool {
return op.t != tRange
}
func NewOp() *Op {
return &Op{key: []byte("")}
}
// OpGet returns "get" operation based on given key and operation options.
func OpGet(key string, opts ...OpOption) Op {
// WithPrefix and WithFromKey are not supported together
@ -394,7 +387,6 @@ func WithPrefix() OpOption {
return
}
op.end = getPrefix(op.key)
op.isOptsWithPrefix = true
}
}
@ -414,7 +406,6 @@ func WithFromKey() OpOption {
op.key = []byte{0}
}
op.end = []byte("\x00")
op.isOptsWithFromKey = true
}
}
@ -563,21 +554,7 @@ func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLi
}
// isWithPrefix returns true if WithPrefix is being called in the op
func isWithPrefix(opts []OpOption) bool {
ret := NewOp()
for _, opt := range opts {
opt(ret)
}
return ret.isOptsWithPrefix
}
func isWithPrefix(opts []OpOption) bool { return isOpFuncCalled("WithPrefix", opts) }
// isWithFromKey returns true if WithFromKey is being called in the op
func isWithFromKey(opts []OpOption) bool {
ret := NewOp()
for _, opt := range opts {
opt(ret)
}
return ret.isOptsWithFromKey
}
func isWithFromKey(opts []OpOption) bool { return isOpFuncCalled("WithFromKey", opts) }

View File

@ -38,4 +38,5 @@
// cli.KV = ordering.NewKV(cli.KV, vf)
//
// Now calls using 'cli' will reject order violations with an error.
//
package ordering

View File

@ -16,7 +16,8 @@ package ordering
import (
"errors"
"sync/atomic"
"sync"
"time"
"go.etcd.io/etcd/clientv3"
)
@ -25,18 +26,26 @@ type OrderViolationFunc func(op clientv3.Op, resp clientv3.OpResponse, prevRev i
var ErrNoGreaterRev = errors.New("etcdclient: no cluster members have a revision higher than the previously received revision")
func NewOrderViolationSwitchEndpointClosure(c *clientv3.Client) OrderViolationFunc {
violationCount := int32(0)
return func(_ clientv3.Op, _ clientv3.OpResponse, _ int64) error {
// Each request is assigned by round-robin load-balancer's picker to a different
// endpoints. If we cycled them 5 times (even with some level of concurrency),
// with high probability no endpoint points on a member with fresh data.
// TODO: Ideally we should track members (resp.opp.Header) that returned
// stale result and explicitly temporarily disable them in 'picker'.
if atomic.LoadInt32(&violationCount) > int32(5*len(c.Endpoints())) {
func NewOrderViolationSwitchEndpointClosure(c clientv3.Client) OrderViolationFunc {
var mu sync.Mutex
violationCount := 0
return func(op clientv3.Op, resp clientv3.OpResponse, prevRev int64) error {
if violationCount > len(c.Endpoints()) {
return ErrNoGreaterRev
}
atomic.AddInt32(&violationCount, 1)
mu.Lock()
defer mu.Unlock()
eps := c.Endpoints()
// force client to connect to given endpoint by limiting to a single endpoint
c.SetEndpoints(eps[violationCount%len(eps)])
// give enough time for operation
time.Sleep(1 * time.Second)
// set available endpoints back to all endpoints in to ensure
// the client has access to all the endpoints.
c.SetEndpoints(eps...)
// give enough time for operation
time.Sleep(1 * time.Second)
violationCount++
return nil
}
}

View File

@ -64,19 +64,19 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) {
// NewOrderViolationSwitchEndpointClosure will be able to
// access the full list of endpoints.
cli.SetEndpoints(eps...)
orderingKv := NewKV(cli.KV, NewOrderViolationSwitchEndpointClosure(cli))
OrderingKv := NewKV(cli.KV, NewOrderViolationSwitchEndpointClosure(*cli))
// set prevRev to the second member's revision of "foo" such that
// the revision is higher than the third member's revision of "foo"
_, err = orderingKv.Get(ctx, "foo")
_, err = OrderingKv.Get(ctx, "foo")
if err != nil {
t.Fatal(err)
}
t.Logf("Reconfigure client to speak only to the 'partitioned' member")
cli.SetEndpoints(clus.Members[2].GRPCAddr())
_, err = orderingKv.Get(ctx, "foo", clientv3.WithSerializable())
if err != ErrNoGreaterRev {
t.Fatal("While speaking to partitioned leader, we should get ErrNoGreaterRev error")
time.Sleep(1 * time.Second) // give enough time for operation
_, err = OrderingKv.Get(ctx, "foo", clientv3.WithSerializable())
if err != nil {
t.Fatalf("failed to resolve order violation %v", err)
}
}
@ -123,7 +123,7 @@ func TestUnresolvableOrderViolation(t *testing.T) {
// access the full list of endpoints.
cli.SetEndpoints(eps...)
time.Sleep(1 * time.Second) // give enough time for operation
OrderingKv := NewKV(cli.KV, NewOrderViolationSwitchEndpointClosure(cli))
OrderingKv := NewKV(cli.KV, NewOrderViolationSwitchEndpointClosure(*cli))
// set prevRev to the first member's revision of "foo" such that
// the revision is higher than the fourth and fifth members' revision of "foo"
_, err = OrderingKv.Get(ctx, "foo")

View File

@ -73,8 +73,8 @@ func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOpt
// its the callCtx deadline or cancellation, in which case try again.
continue
}
if c.shouldRefreshToken(lastErr, callOpts) {
gterr := c.refreshToken(ctx)
if callOpts.retryAuth && rpctypes.Error(lastErr) == rpctypes.ErrInvalidAuthToken {
gterr := c.getToken(ctx)
if gterr != nil {
logger.Warn(
"retrying of unary invoker failed to fetch new auth token",
@ -142,37 +142,6 @@ func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOp
}
}
// shouldRefreshToken checks whether there's a need to refresh the token based on the error and callOptions,
// and returns a boolean value.
func (c *Client) shouldRefreshToken(err error, callOpts *options) bool {
if rpctypes.Error(err) == rpctypes.ErrUserEmpty {
// refresh the token when username, password is present but the server returns ErrUserEmpty
// which is possible when the client token is cleared somehow
return c.authTokenBundle != nil // equal to c.Username != "" && c.Password != ""
}
return callOpts.retryAuth &&
(rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken || rpctypes.Error(err) == rpctypes.ErrAuthOldRevision)
}
func (c *Client) refreshToken(ctx context.Context) error {
if c.authTokenBundle == nil {
// c.authTokenBundle will be initialized only when
// c.Username != "" && c.Password != "".
//
// When users use the TLS CommonName based authentication, the
// authTokenBundle is always nil. But it's possible for the clients
// to get `rpctypes.ErrAuthOldRevision` response when the clients
// concurrently modify auth data (e.g, addUser, deleteUser etc.).
// In this case, there is no need to refresh the token; instead the
// clients just need to retry the operations (e.g. Put, Delete etc).
return nil
}
// clear auth token before refreshing it.
c.authTokenBundle.UpdateAuthToken("")
return c.getToken(ctx)
}
// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a
// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish
// a new ClientStream according to the retry policy.
@ -270,8 +239,8 @@ func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}
// its the callCtx deadline or cancellation, in which case try again.
return true, err
}
if s.client.shouldRefreshToken(err, s.callOpts) {
gterr := s.client.refreshToken(s.ctx)
if s.callOpts.retryAuth && rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
gterr := s.client.getToken(s.ctx)
if gterr != nil {
s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gterr))
return false, err // return the original error for simplicity

View File

@ -1,141 +0,0 @@
// Copyright 2022 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Based on github.com/grpc-ecosystem/go-grpc-middleware/retry, but modified to support the more
// fine grained error checking required by write-at-most-once retry semantics of etcd.
package clientv3
import (
"go.etcd.io/etcd/clientv3/credentials"
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
grpccredentials "google.golang.org/grpc/credentials"
"testing"
)
type dummyAuthTokenBundle struct{}
func (d dummyAuthTokenBundle) TransportCredentials() grpccredentials.TransportCredentials {
return nil
}
func (d dummyAuthTokenBundle) PerRPCCredentials() grpccredentials.PerRPCCredentials {
return nil
}
func (d dummyAuthTokenBundle) NewWithMode(mode string) (grpccredentials.Bundle, error) {
return nil, nil
}
func (d dummyAuthTokenBundle) UpdateAuthToken(token string) {
}
func TestClientShouldRefreshToken(t *testing.T) {
type fields struct {
authTokenBundle credentials.Bundle
}
type args struct {
err error
callOpts *options
}
optsWithTrue := &options{
retryAuth: true,
}
optsWithFalse := &options{
retryAuth: false,
}
tests := []struct {
name string
fields fields
args args
want bool
}{
{
name: "ErrUserEmpty and non nil authTokenBundle",
fields: fields{
authTokenBundle: &dummyAuthTokenBundle{},
},
args: args{rpctypes.ErrGRPCUserEmpty, optsWithTrue},
want: true,
},
{
name: "ErrUserEmpty and nil authTokenBundle",
fields: fields{
authTokenBundle: nil,
},
args: args{rpctypes.ErrGRPCUserEmpty, optsWithTrue},
want: false,
},
{
name: "ErrGRPCInvalidAuthToken and retryAuth",
fields: fields{
authTokenBundle: nil,
},
args: args{rpctypes.ErrGRPCInvalidAuthToken, optsWithTrue},
want: true,
},
{
name: "ErrGRPCInvalidAuthToken and !retryAuth",
fields: fields{
authTokenBundle: nil,
},
args: args{rpctypes.ErrGRPCInvalidAuthToken, optsWithFalse},
want: false,
},
{
name: "ErrGRPCAuthOldRevision and retryAuth",
fields: fields{
authTokenBundle: nil,
},
args: args{rpctypes.ErrGRPCAuthOldRevision, optsWithTrue},
want: true,
},
{
name: "ErrGRPCAuthOldRevision and !retryAuth",
fields: fields{
authTokenBundle: nil,
},
args: args{rpctypes.ErrGRPCAuthOldRevision, optsWithFalse},
want: false,
},
{
name: "Other error and retryAuth",
fields: fields{
authTokenBundle: nil,
},
args: args{rpctypes.ErrGRPCAuthFailed, optsWithTrue},
want: false,
},
{
name: "Other error and !retryAuth",
fields: fields{
authTokenBundle: nil,
},
args: args{rpctypes.ErrGRPCAuthFailed, optsWithFalse},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := &Client{
authTokenBundle: tt.fields.authTokenBundle,
}
if got := c.shouldRefreshToken(tt.args.err, tt.args.callOpts); got != tt.want {
t.Errorf("shouldRefreshToken() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -69,8 +69,8 @@ func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) {
cfg.Name = "3"
cfg.InitialClusterToken = testClusterTkn
cfg.ClusterState = "existing"
cfg.ListenClientUrls, cfg.AdvertiseClientUrls = newCURLs, newCURLs
cfg.ListenPeerUrls, cfg.AdvertisePeerUrls = newPURLs, newPURLs
cfg.LCUrls, cfg.ACUrls = newCURLs, newCURLs
cfg.LPUrls, cfg.APUrls = newPURLs, newPURLs
cfg.InitialCluster = ""
for i := 0; i < clusterN; i++ {
cfg.InitialCluster += fmt.Sprintf(",%d=%s", i, pURLs[i].String())

View File

@ -391,7 +391,7 @@ func (s *v3Manager) saveDB() error {
be := backend.NewDefaultBackend(dbpath)
// a lessor never timeouts leases
lessor := lease.NewLessor(s.lg, be, nil, lease.LessorConfig{MinLeaseTTL: math.MaxInt64})
lessor := lease.NewLessor(s.lg, be, lease.LessorConfig{MinLeaseTTL: math.MaxInt64})
mvs := mvcc.NewStore(s.lg, be, lessor, (*initIndex)(&commit), mvcc.StoreConfig{CompactionBatchLimit: math.MaxInt32})
txn := mvs.Write(traceutil.TODO())

View File

@ -51,8 +51,8 @@ func TestSnapshotV3RestoreSingle(t *testing.T) {
cfg.Name = "s1"
cfg.InitialClusterToken = testClusterTkn
cfg.ClusterState = "existing"
cfg.ListenClientUrls, cfg.AdvertiseClientUrls = cURLs, cURLs
cfg.ListenPeerUrls, cfg.AdvertisePeerUrls = pURLs, pURLs
cfg.LCUrls, cfg.ACUrls = cURLs, cURLs
cfg.LPUrls, cfg.APUrls = pURLs, pURLs
cfg.InitialCluster = fmt.Sprintf("%s=%s", cfg.Name, pURLs[0].String())
cfg.Dir = filepath.Join(os.TempDir(), fmt.Sprint(time.Now().Nanosecond()))
@ -87,8 +87,7 @@ func TestSnapshotV3RestoreSingle(t *testing.T) {
}
var cli *clientv3.Client
cli, err = clientv3.New(clientv3.Config{Endpoints: []string{cfg.AdvertiseClientUrls[0].String()}})
cli, err = clientv3.New(clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}})
if err != nil {
t.Fatal(err)
}
@ -204,8 +203,8 @@ func createSnapshotFile(t *testing.T, kvs []kv) string {
cfg.Debug = false
cfg.Name = "default"
cfg.ClusterState = "new"
cfg.ListenClientUrls, cfg.AdvertiseClientUrls = cURLs, cURLs
cfg.ListenPeerUrls, cfg.AdvertisePeerUrls = pURLs, pURLs
cfg.LCUrls, cfg.ACUrls = cURLs, cURLs
cfg.LPUrls, cfg.APUrls = pURLs, pURLs
cfg.InitialCluster = fmt.Sprintf("%s=%s", cfg.Name, pURLs[0].String())
cfg.Dir = filepath.Join(os.TempDir(), fmt.Sprint(time.Now().Nanosecond()))
srv, err := embed.StartEtcd(cfg)
@ -222,7 +221,7 @@ func createSnapshotFile(t *testing.T, kvs []kv) string {
t.Fatalf("failed to start embed.Etcd for creating snapshots")
}
ccfg := clientv3.Config{Endpoints: []string{cfg.AdvertiseClientUrls[0].String()}}
ccfg := clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}}
cli, err := clientv3.New(ccfg)
if err != nil {
t.Fatal(err)
@ -272,8 +271,8 @@ func restoreCluster(t *testing.T, clusterN int, dbPath string) (
cfg.Name = fmt.Sprintf("%d", i)
cfg.InitialClusterToken = testClusterTkn
cfg.ClusterState = "existing"
cfg.ListenClientUrls, cfg.AdvertiseClientUrls = []url.URL{cURLs[i]}, []url.URL{cURLs[i]}
cfg.ListenPeerUrls, cfg.AdvertisePeerUrls = []url.URL{pURLs[i]}, []url.URL{pURLs[i]}
cfg.LCUrls, cfg.ACUrls = []url.URL{cURLs[i]}, []url.URL{cURLs[i]}
cfg.LPUrls, cfg.APUrls = []url.URL{pURLs[i]}, []url.URL{pURLs[i]}
cfg.InitialCluster = ics
cfg.Dir = filepath.Join(os.TempDir(), fmt.Sprint(time.Now().Nanosecond()+i))

View File

@ -33,6 +33,7 @@ import (
// ).Else(
// OpPut(k4,v4), OpPut(k5,v5)
// ).Commit()
//
type Txn interface {
// If takes a list of comparison. If all comparisons passed in succeed,
// the operations passed into Then() will be executed. Or the operations

View File

@ -16,6 +16,9 @@ package clientv3
import (
"math/rand"
"reflect"
"runtime"
"strings"
"time"
)
@ -29,3 +32,18 @@ func jitterUp(duration time.Duration, jitter float64) time.Duration {
multiplier := jitter * (rand.Float64()*2 - 1)
return time.Duration(float64(duration) * (1 + multiplier))
}
// Check if the provided function is being called in the op options.
func isOpFuncCalled(op string, opts []OpOption) bool {
for _, opt := range opts {
v := reflect.ValueOf(opt)
if v.Kind() == reflect.Func {
if opFunc := runtime.FuncForPC(v.Pointer()); opFunc != nil {
if strings.Contains(opFunc.Name(), op) {
return true
}
}
}
}
return false
}

View File

@ -37,13 +37,6 @@ const (
EventTypePut = mvccpb.PUT
closeSendErrTimeout = 250 * time.Millisecond
// AutoWatchID is the watcher ID passed in WatchStream.Watch when no
// user-provided ID is available. If pass, an ID will automatically be assigned.
AutoWatchID = 0
// InvalidWatchID represents an invalid watch ID and prevents duplication with an existing watch.
InvalidWatchID = -1
)
type Event mvccpb.Event
@ -450,7 +443,7 @@ func (w *watcher) closeStream(wgs *watchGrpcStream) {
func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) {
// check watch ID for backward compatibility (<= v3.3)
if resp.WatchId == InvalidWatchID || (resp.Canceled && resp.CancelReason != "") {
if resp.WatchId == -1 || (resp.Canceled && resp.CancelReason != "") {
w.closeErr = v3rpc.Error(errors.New(resp.CancelReason))
// failed; no channel
close(ws.recvc)
@ -481,7 +474,7 @@ func (w *watchGrpcStream) closeSubstream(ws *watcherStream) {
} else if ws.outc != nil {
close(ws.outc)
}
if ws.id != InvalidWatchID {
if ws.id != -1 {
delete(w.substreams, ws.id)
return
}
@ -533,7 +526,6 @@ func (w *watchGrpcStream) run() {
cancelSet := make(map[int64]struct{})
var cur *pb.WatchResponse
backoff := time.Millisecond
for {
select {
// Watch() requested
@ -544,7 +536,7 @@ func (w *watchGrpcStream) run() {
// TODO: pass custom watch ID?
ws := &watcherStream{
initReq: *wreq,
id: InvalidWatchID,
id: -1,
outc: outc,
// unbuffered so resumes won't cause repeat events
recvc: make(chan *WatchResponse),
@ -658,7 +650,6 @@ func (w *watchGrpcStream) run() {
closeErr = err
return
}
backoff = w.backoffIfUnavailable(backoff, err)
if wc, closeErr = w.newWatchClient(); closeErr != nil {
return
}
@ -675,7 +666,7 @@ func (w *watchGrpcStream) run() {
return
case ws := <-w.closingc:
if ws.id != InvalidWatchID {
if ws.id != -1 {
// client is closing an established watch; close it on the server proactively instead of waiting
// to close when the next message arrives
cancelSet[ws.id] = struct{}{}
@ -732,9 +723,9 @@ func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
cancelReason: pbresp.CancelReason,
}
// watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of InvalidWatchID to
// watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of -1 to
// indicate they should be broadcast.
if wr.IsProgressNotify() && pbresp.WatchId == InvalidWatchID {
if wr.IsProgressNotify() && pbresp.WatchId == -1 {
return w.broadcastResponse(wr)
}
@ -855,7 +846,7 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{
}
} else {
// current progress of watch; <= store revision
nextRev = wr.Header.Revision + 1
nextRev = wr.Header.Revision
}
if len(wr.Events) > 0 {
@ -889,7 +880,7 @@ func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
w.resumec = make(chan struct{})
w.joinSubstreams()
for _, ws := range w.substreams {
ws.id = InvalidWatchID
ws.id = -1
w.resuming = append(w.resuming, ws)
}
// strip out nils, if any
@ -979,21 +970,6 @@ func (w *watchGrpcStream) joinSubstreams() {
var maxBackoff = 100 * time.Millisecond
func (w *watchGrpcStream) backoffIfUnavailable(backoff time.Duration, err error) time.Duration {
if isUnavailableErr(w.ctx, err) {
// retry, but backoff
if backoff < maxBackoff {
// 25% backoff factor
backoff = backoff + backoff/4
if backoff > maxBackoff {
backoff = maxBackoff
}
}
time.Sleep(backoff)
}
return backoff
}
// openWatchClient retries opening a watch client until success or halt.
// manually retry in case "ws==nil && err==nil"
// TODO: remove FailFast=false
@ -1014,7 +990,17 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error)
if isHaltErr(w.ctx, err) {
return nil, v3rpc.Error(err)
}
backoff = w.backoffIfUnavailable(backoff, err)
if isUnavailableErr(w.ctx, err) {
// retry, but backoff
if backoff < maxBackoff {
// 25% backoff factor
backoff = backoff + backoff/4
if backoff > maxBackoff {
backoff = maxBackoff
}
}
time.Sleep(backoff)
}
}
return ws, nil
}

View File

@ -18,7 +18,6 @@ import (
"crypto/tls"
"fmt"
"io/ioutil"
"math"
"net"
"net/http"
"net/url"
@ -54,9 +53,7 @@ const (
DefaultMaxSnapshots = 5
DefaultMaxWALs = 5
DefaultMaxTxnOps = uint(128)
DefaultWarningApplyDuration = 100 * time.Millisecond
DefaultMaxRequestBytes = 1.5 * 1024 * 1024
DefaultMaxConcurrentStreams = math.MaxUint32
DefaultGRPCKeepAliveMinTime = 5 * time.Second
DefaultGRPCKeepAliveInterval = 2 * time.Hour
DefaultGRPCKeepAliveTimeout = 20 * time.Second
@ -179,12 +176,8 @@ type Config struct {
MaxTxnOps uint `json:"max-txn-ops"`
MaxRequestBytes uint `json:"max-request-bytes"`
// MaxConcurrentStreams specifies the maximum number of concurrent
// streams that each client can open at a time.
MaxConcurrentStreams uint32 `json:"max-concurrent-streams"`
ListenPeerUrls, ListenClientUrls, ListenClientHttpUrls []url.URL
AdvertisePeerUrls, AdvertiseClientUrls []url.URL
LPUrls, LCUrls []url.URL
APUrls, ACUrls []url.URL
ClientTLSInfo transport.TLSInfo
ClientAutoTLS bool
PeerTLSInfo transport.TLSInfo
@ -195,11 +188,6 @@ type Config struct {
// Note that cipher suites are prioritized in the given order.
CipherSuites []string `json:"cipher-suites"`
// TlsMinVersion is the minimum accepted TLS version between client/server and peers.
TlsMinVersion string `json:"tls-min-version"`
// TlsMaxVersion is the maximum accepted TLS version between client/server and peers.
TlsMaxVersion string `json:"tls-max-version"`
ClusterState string `json:"initial-cluster-state"`
DNSCluster string `json:"discovery-srv"`
DNSClusterServiceName string `json:"discovery-srv-name"`
@ -285,7 +273,7 @@ type Config struct {
AuthToken string `json:"auth-token"`
BcryptCost uint `json:"bcrypt-cost"`
// AuthTokenTTL specifies the TTL in seconds of the simple token
//The AuthTokenTTL in seconds of the simple token
AuthTokenTTL uint `json:"auth-token-ttl"`
ExperimentalInitialCorruptCheck bool `json:"experimental-initial-corrupt-check"`
@ -293,18 +281,10 @@ type Config struct {
ExperimentalEnableV2V3 string `json:"experimental-enable-v2v3"`
// ExperimentalBackendFreelistType specifies the type of freelist that boltdb backend uses (array and map are supported types).
ExperimentalBackendFreelistType string `json:"experimental-backend-bbolt-freelist-type"`
// ExperimentalEnableLeaseCheckpoint enables leader to send regular checkpoints to other members to prevent reset of remaining TTL on leader change.
// ExperimentalEnableLeaseCheckpoint enables primary lessor to persist lease remainingTTL to prevent indefinite auto-renewal of long lived leases.
ExperimentalEnableLeaseCheckpoint bool `json:"experimental-enable-lease-checkpoint"`
// ExperimentalEnableLeaseCheckpointPersist enables persisting remainingTTL to prevent indefinite auto-renewal of long lived leases. Always enabled in v3.6. Should be used to ensure smooth upgrade from v3.5 clusters with this feature enabled.
// Requires experimental-enable-lease-checkpoint to be enabled.
// Deprecated in v3.6.
// TODO: Delete in v3.7
ExperimentalEnableLeaseCheckpointPersist bool `json:"experimental-enable-lease-checkpoint-persist"`
ExperimentalCompactionBatchLimit int `json:"experimental-compaction-batch-limit"`
ExperimentalWatchProgressNotifyInterval time.Duration `json:"experimental-watch-progress-notify-interval"`
// ExperimentalWarningApplyDuration is the time duration after which a warning is generated if applying request
// takes more time than this value.
ExperimentalWarningApplyDuration time.Duration `json:"experimental-warning-apply-duration"`
// ForceNewCluster starts a new cluster even if previously started; unsafe.
ForceNewCluster bool `json:"force-new-cluster"`
@ -373,11 +353,10 @@ type configYAML struct {
// configJSON has file options that are translated into Config options
type configJSON struct {
ListenPeerUrls string `json:"listen-peer-urls"`
ListenClientUrls string `json:"listen-client-urls"`
ListenClientHttpUrls string `json:"listen-client-http-urls"`
AdvertisePeerUrls string `json:"initial-advertise-peer-urls"`
AdvertiseClientUrls string `json:"advertise-client-urls"`
LPUrlsJSON string `json:"listen-peer-urls"`
LCUrlsJSON string `json:"listen-client-urls"`
APUrlsJSON string `json:"initial-advertise-peer-urls"`
ACUrlsJSON string `json:"advertise-client-urls"`
CORSJSON string `json:"cors"`
HostWhitelistJSON string `json:"host-whitelist"`
@ -411,8 +390,6 @@ func NewConfig() *Config {
MaxTxnOps: DefaultMaxTxnOps,
MaxRequestBytes: DefaultMaxRequestBytes,
MaxConcurrentStreams: DefaultMaxConcurrentStreams,
ExperimentalWarningApplyDuration: DefaultWarningApplyDuration,
GRPCKeepAliveMinTime: DefaultGRPCKeepAliveMinTime,
GRPCKeepAliveInterval: DefaultGRPCKeepAliveInterval,
@ -422,10 +399,10 @@ func NewConfig() *Config {
ElectionMs: 1000,
InitialElectionTickAdvance: true,
ListenPeerUrls: []url.URL{*lpurl},
ListenClientUrls: []url.URL{*lcurl},
AdvertisePeerUrls: []url.URL{*apurl},
AdvertiseClientUrls: []url.URL{*acurl},
LPUrls: []url.URL{*lpurl},
LCUrls: []url.URL{*lcurl},
APUrls: []url.URL{*apurl},
ACUrls: []url.URL{*acurl},
ClusterState: ClusterStateFlagNew,
InitialClusterToken: "etcd-cluster",
@ -490,49 +467,40 @@ func (cfg *configYAML) configFromFile(path string) error {
return err
}
if cfg.configJSON.ListenPeerUrls != "" {
u, err := types.NewURLs(strings.Split(cfg.configJSON.ListenPeerUrls, ","))
if cfg.LPUrlsJSON != "" {
u, err := types.NewURLs(strings.Split(cfg.LPUrlsJSON, ","))
if err != nil {
fmt.Fprintf(os.Stderr, "unexpected error setting up listen-peer-urls: %v\n", err)
os.Exit(1)
}
cfg.Config.ListenPeerUrls = u
cfg.LPUrls = []url.URL(u)
}
if cfg.configJSON.ListenClientUrls != "" {
u, err := types.NewURLs(strings.Split(cfg.configJSON.ListenClientUrls, ","))
if cfg.LCUrlsJSON != "" {
u, err := types.NewURLs(strings.Split(cfg.LCUrlsJSON, ","))
if err != nil {
fmt.Fprintf(os.Stderr, "unexpected error setting up listen-client-urls: %v\n", err)
os.Exit(1)
}
cfg.Config.ListenClientUrls = u
cfg.LCUrls = []url.URL(u)
}
if cfg.configJSON.ListenClientHttpUrls != "" {
u, err := types.NewURLs(strings.Split(cfg.configJSON.ListenClientHttpUrls, ","))
if err != nil {
fmt.Fprintf(os.Stderr, "unexpected error setting up listen-client-http-urls: %v\n", err)
os.Exit(1)
}
cfg.Config.ListenClientHttpUrls = u
}
if cfg.configJSON.AdvertisePeerUrls != "" {
u, err := types.NewURLs(strings.Split(cfg.configJSON.AdvertisePeerUrls, ","))
if cfg.APUrlsJSON != "" {
u, err := types.NewURLs(strings.Split(cfg.APUrlsJSON, ","))
if err != nil {
fmt.Fprintf(os.Stderr, "unexpected error setting up initial-advertise-peer-urls: %v\n", err)
os.Exit(1)
}
cfg.Config.AdvertisePeerUrls = u
cfg.APUrls = []url.URL(u)
}
if cfg.configJSON.AdvertiseClientUrls != "" {
u, err := types.NewURLs(strings.Split(cfg.configJSON.AdvertiseClientUrls, ","))
if cfg.ACUrlsJSON != "" {
u, err := types.NewURLs(strings.Split(cfg.ACUrlsJSON, ","))
if err != nil {
fmt.Fprintf(os.Stderr, "unexpected error setting up advertise-peer-urls: %v\n", err)
os.Exit(1)
}
cfg.Config.AdvertiseClientUrls = u
cfg.ACUrls = []url.URL(u)
}
if cfg.ListenMetricsUrlsJSON != "" {
@ -581,56 +549,39 @@ func updateCipherSuites(tls *transport.TLSInfo, ss []string) error {
return fmt.Errorf("TLSInfo.CipherSuites is already specified (given %v)", ss)
}
if len(ss) > 0 {
cs, err := tlsutil.GetCipherSuites(ss)
if err != nil {
return err
cs := make([]uint16, len(ss))
for i, s := range ss {
var ok bool
cs[i], ok = tlsutil.GetCipherSuite(s)
if !ok {
return fmt.Errorf("unexpected TLS cipher suite %q", s)
}
}
tls.CipherSuites = cs
}
return nil
}
func updateMinMaxVersions(info *transport.TLSInfo, min, max string) {
// Validate() has been called to check the user input, so it should never fail.
var err error
if info.MinVersion, err = tlsutil.GetTLSVersion(min); err != nil {
panic(err)
}
if info.MaxVersion, err = tlsutil.GetTLSVersion(max); err != nil {
panic(err)
}
}
// Validate ensures that '*embed.Config' fields are properly configured.
func (cfg *Config) Validate() error {
if err := cfg.setupLogging(); err != nil {
return err
}
if err := checkBindURLs(cfg.ListenPeerUrls); err != nil {
if err := checkBindURLs(cfg.LPUrls); err != nil {
return err
}
if err := checkBindURLs(cfg.ListenClientUrls); err != nil {
if err := checkBindURLs(cfg.LCUrls); err != nil {
return err
}
if err := checkBindURLs(cfg.ListenClientHttpUrls); err != nil {
return err
}
if len(cfg.ListenClientHttpUrls) == 0 {
if cfg.logger != nil {
cfg.logger.Warn("Running http and grpc server on single port. This is not recommended for production.")
} else {
plog.Warning("Running http and grpc server on single port. This is not recommended for production.")
}
}
if err := checkBindURLs(cfg.ListenMetricsUrls); err != nil {
return err
}
if err := checkHostURLs(cfg.AdvertisePeerUrls); err != nil {
addrs := cfg.getAdvertisePeerUrls()
if err := checkHostURLs(cfg.APUrls); err != nil {
addrs := cfg.getAPURLs()
return fmt.Errorf(`--initial-advertise-peer-urls %q must be "host:port" (%v)`, strings.Join(addrs, ","), err)
}
if err := checkHostURLs(cfg.AdvertiseClientUrls); err != nil {
addrs := cfg.getAdvertiseClientUrls()
if err := checkHostURLs(cfg.ACUrls); err != nil {
addrs := cfg.getACURLs()
return fmt.Errorf(`--advertise-client-urls %q must be "host:port" (%v)`, strings.Join(addrs, ","), err)
}
// Check if conflicting flags are passed.
@ -663,7 +614,7 @@ func (cfg *Config) Validate() error {
}
// check this last since proxying in etcdmain may make this OK
if cfg.ListenClientUrls != nil && cfg.AdvertiseClientUrls == nil {
if cfg.LCUrls != nil && cfg.ACUrls == nil {
return ErrUnsetAdvertiseClientURLsFlag
}
@ -674,33 +625,6 @@ func (cfg *Config) Validate() error {
return fmt.Errorf("unknown auto-compaction-mode %q", cfg.AutoCompactionMode)
}
if !cfg.ExperimentalEnableLeaseCheckpointPersist && cfg.ExperimentalEnableLeaseCheckpoint {
cfg.logger.Warn("Detected that checkpointing is enabled without persistence. Consider enabling experimental-enable-lease-checkpoint-persist")
}
if cfg.ExperimentalEnableLeaseCheckpointPersist && !cfg.ExperimentalEnableLeaseCheckpoint {
return fmt.Errorf("setting experimental-enable-lease-checkpoint-persist requires experimental-enable-lease-checkpoint")
}
minVersion, err := tlsutil.GetTLSVersion(cfg.TlsMinVersion)
if err != nil {
return err
}
maxVersion, err := tlsutil.GetTLSVersion(cfg.TlsMaxVersion)
if err != nil {
return err
}
// maxVersion == 0 means that Go selects the highest available version.
if maxVersion != 0 && minVersion > maxVersion {
return fmt.Errorf("min version (%s) is greater than max version (%s)", cfg.TlsMinVersion, cfg.TlsMaxVersion)
}
// Check if user attempted to configure ciphers for TLS1.3 only: Go does not support that currently.
if minVersion == tls.VersionTLS13 && len(cfg.CipherSuites) > 0 {
return fmt.Errorf("cipher suites cannot be configured when only TLS1.3 is enabled")
}
return nil
}
@ -712,7 +636,7 @@ func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, tok
urlsmap = types.URLsMap{}
// If using discovery, generate a temporary cluster based on
// self's advertised peer URLs
urlsmap[cfg.Name] = cfg.AdvertisePeerUrls
urlsmap[cfg.Name] = cfg.APUrls
token = cfg.Durl
case cfg.DNSCluster != "":
@ -768,7 +692,7 @@ func (cfg *Config) GetDNSClusterNames() ([]string, error) {
// Use both etcd-server-ssl and etcd-server for discovery.
// Combine the results if both are available.
clusterStrs, cerr = srv.GetCluster("https", "etcd-server-ssl"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.AdvertisePeerUrls)
clusterStrs, cerr = srv.GetCluster("https", "etcd-server-ssl"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.APUrls)
if cerr != nil {
clusterStrs = make([]string, 0)
}
@ -779,13 +703,13 @@ func (cfg *Config) GetDNSClusterNames() ([]string, error) {
zap.String("service-name", "etcd-server-ssl"+serviceNameSuffix),
zap.String("server-name", cfg.Name),
zap.String("discovery-srv", cfg.DNSCluster),
zap.Strings("advertise-peer-urls", cfg.getAdvertisePeerUrls()),
zap.Strings("advertise-peer-urls", cfg.getAPURLs()),
zap.Strings("found-cluster", clusterStrs),
zap.Error(cerr),
)
}
defaultHTTPClusterStrs, httpCerr := srv.GetCluster("http", "etcd-server"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.AdvertisePeerUrls)
defaultHTTPClusterStrs, httpCerr := srv.GetCluster("http", "etcd-server"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.APUrls)
if httpCerr != nil {
clusterStrs = append(clusterStrs, defaultHTTPClusterStrs...)
}
@ -796,7 +720,7 @@ func (cfg *Config) GetDNSClusterNames() ([]string, error) {
zap.String("service-name", "etcd-server"+serviceNameSuffix),
zap.String("server-name", cfg.Name),
zap.String("discovery-srv", cfg.DNSCluster),
zap.Strings("advertise-peer-urls", cfg.getAdvertisePeerUrls()),
zap.Strings("advertise-peer-urls", cfg.getAPURLs()),
zap.Strings("found-cluster", clusterStrs),
zap.Error(httpCerr),
)
@ -806,15 +730,15 @@ func (cfg *Config) GetDNSClusterNames() ([]string, error) {
}
func (cfg Config) InitialClusterFromName(name string) (ret string) {
if len(cfg.AdvertisePeerUrls) == 0 {
if len(cfg.APUrls) == 0 {
return ""
}
n := name
if name == "" {
n = DefaultName
}
for i := range cfg.AdvertisePeerUrls {
ret = ret + "," + n + "=" + cfg.AdvertisePeerUrls[i].String()
for i := range cfg.APUrls {
ret = ret + "," + n + "=" + cfg.APUrls[i].String()
}
return ret[1:]
}
@ -823,11 +747,11 @@ func (cfg Config) IsNewCluster() bool { return cfg.ClusterState == ClusterStateF
func (cfg Config) ElectionTicks() int { return int(cfg.ElectionMs / cfg.TickMs) }
func (cfg Config) defaultPeerHost() bool {
return len(cfg.AdvertisePeerUrls) == 1 && cfg.AdvertisePeerUrls[0].String() == DefaultInitialAdvertisePeerURLs
return len(cfg.APUrls) == 1 && cfg.APUrls[0].String() == DefaultInitialAdvertisePeerURLs
}
func (cfg Config) defaultClientHost() bool {
return len(cfg.AdvertiseClientUrls) == 1 && cfg.AdvertiseClientUrls[0].String() == DefaultAdvertiseClientURLs
return len(cfg.ACUrls) == 1 && cfg.ACUrls[0].String() == DefaultAdvertiseClientURLs
}
func (cfg *Config) ClientSelfCert() (err error) {
@ -842,12 +766,9 @@ func (cfg *Config) ClientSelfCert() (err error) {
}
return nil
}
chosts := make([]string, 0, len(cfg.ListenClientUrls)+len(cfg.ListenClientHttpUrls))
for _, u := range cfg.ListenClientUrls {
chosts = append(chosts, u.Host)
}
for _, u := range cfg.ListenClientHttpUrls {
chosts = append(chosts, u.Host)
chosts := make([]string, len(cfg.LCUrls))
for i, u := range cfg.LCUrls {
chosts[i] = u.Host
}
cfg.ClientTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "client"), chosts)
if err != nil {
@ -868,8 +789,8 @@ func (cfg *Config) PeerSelfCert() (err error) {
}
return nil
}
phosts := make([]string, len(cfg.ListenPeerUrls))
for i, u := range cfg.ListenPeerUrls {
phosts := make([]string, len(cfg.LPUrls))
for i, u := range cfg.LPUrls {
phosts[i] = u.Host
}
cfg.PeerTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "peer"), phosts)
@ -897,9 +818,9 @@ func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (s
}
used := false
pip, pport := cfg.ListenPeerUrls[0].Hostname(), cfg.ListenPeerUrls[0].Port()
pip, pport := cfg.LPUrls[0].Hostname(), cfg.LPUrls[0].Port()
if cfg.defaultPeerHost() && pip == "0.0.0.0" {
cfg.AdvertisePeerUrls[0] = url.URL{Scheme: cfg.AdvertisePeerUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, pport)}
cfg.APUrls[0] = url.URL{Scheme: cfg.APUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, pport)}
used = true
}
// update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc')
@ -907,9 +828,9 @@ func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (s
cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
}
cip, cport := cfg.ListenClientUrls[0].Hostname(), cfg.ListenClientUrls[0].Port()
cip, cport := cfg.LCUrls[0].Hostname(), cfg.LCUrls[0].Port()
if cfg.defaultClientHost() && cip == "0.0.0.0" {
cfg.AdvertiseClientUrls[0] = url.URL{Scheme: cfg.AdvertiseClientUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, cport)}
cfg.ACUrls[0] = url.URL{Scheme: cfg.ACUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, cport)}
used = true
}
dhost := defaultHostname
@ -954,42 +875,34 @@ func checkHostURLs(urls []url.URL) error {
return nil
}
func (cfg *Config) getAdvertisePeerUrls() (ss []string) {
ss = make([]string, len(cfg.AdvertisePeerUrls))
for i := range cfg.AdvertisePeerUrls {
ss[i] = cfg.AdvertisePeerUrls[i].String()
func (cfg *Config) getAPURLs() (ss []string) {
ss = make([]string, len(cfg.APUrls))
for i := range cfg.APUrls {
ss[i] = cfg.APUrls[i].String()
}
return ss
}
func (cfg *Config) getListenPeerUrls() (ss []string) {
ss = make([]string, len(cfg.ListenPeerUrls))
for i := range cfg.ListenPeerUrls {
ss[i] = cfg.ListenPeerUrls[i].String()
func (cfg *Config) getLPURLs() (ss []string) {
ss = make([]string, len(cfg.LPUrls))
for i := range cfg.LPUrls {
ss[i] = cfg.LPUrls[i].String()
}
return ss
}
func (cfg *Config) getAdvertiseClientUrls() (ss []string) {
ss = make([]string, len(cfg.AdvertiseClientUrls))
for i := range cfg.AdvertiseClientUrls {
ss[i] = cfg.AdvertiseClientUrls[i].String()
func (cfg *Config) getACURLs() (ss []string) {
ss = make([]string, len(cfg.ACUrls))
for i := range cfg.ACUrls {
ss[i] = cfg.ACUrls[i].String()
}
return ss
}
func (cfg *Config) getListenClientUrls() (ss []string) {
ss = make([]string, len(cfg.ListenClientUrls))
for i := range cfg.ListenClientUrls {
ss[i] = cfg.ListenClientUrls[i].String()
}
return ss
}
func (cfg *Config) getListenClientHttpUrls() (ss []string) {
ss = make([]string, len(cfg.ListenClientHttpUrls))
for i := range cfg.ListenClientHttpUrls {
ss[i] = cfg.ListenClientHttpUrls[i].String()
func (cfg *Config) getLCURLs() (ss []string) {
ss = make([]string, len(cfg.LCUrls))
for i := range cfg.LCUrls {
ss[i] = cfg.LCUrls[i].String()
}
return ss
}

View File

@ -196,15 +196,11 @@ func (cfg *Config) setupLogging() error {
grpcLogOnce.Do(func() {
// debug true, enable info, warning, error
// debug false, only discard info
if cfg.LogLevel == "debug" {
var gl grpclog.LoggerV2
gl, err = logutil.NewGRPCLoggerV2(copied)
if err == nil {
grpclog.SetLoggerV2(gl)
}
} else {
grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr))
}
})
return nil
}
@ -249,11 +245,7 @@ func (cfg *Config) setupLogging() error {
c.loggerWriteSyncer = syncer
grpcLogOnce.Do(func() {
if cfg.LogLevel == "debug" {
grpclog.SetLoggerV2(logutil.NewGRPCLoggerV2FromZapCore(cr, syncer))
} else {
grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr))
}
})
return nil
}

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !windows
// +build !windows
package embed

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build windows
// +build windows
package embed

View File

@ -15,7 +15,6 @@
package embed
import (
"crypto/tls"
"fmt"
"io/ioutil"
"net/url"
@ -23,7 +22,6 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"go.etcd.io/etcd/pkg/transport"
"sigs.k8s.io/yaml"
@ -77,12 +75,12 @@ func TestConfigFileOtherFields(t *testing.T) {
func TestUpdateDefaultClusterFromName(t *testing.T) {
cfg := NewConfig()
defaultInitialCluster := cfg.InitialCluster
oldscheme := cfg.AdvertisePeerUrls[0].Scheme
origpeer := cfg.AdvertisePeerUrls[0].String()
origadvc := cfg.AdvertiseClientUrls[0].String()
oldscheme := cfg.APUrls[0].Scheme
origpeer := cfg.APUrls[0].String()
origadvc := cfg.ACUrls[0].String()
cfg.Name = "abc"
lpport := cfg.ListenPeerUrls[0].Port()
lpport := cfg.LPUrls[0].Port()
// in case of 'etcd --name=abc'
exp := fmt.Sprintf("%s=%s://localhost:%s", cfg.Name, oldscheme, lpport)
@ -91,12 +89,12 @@ func TestUpdateDefaultClusterFromName(t *testing.T) {
t.Fatalf("initial-cluster expected %q, got %q", exp, cfg.InitialCluster)
}
// advertise peer URL should not be affected
if origpeer != cfg.AdvertisePeerUrls[0].String() {
t.Fatalf("advertise peer url expected %q, got %q", origadvc, cfg.AdvertisePeerUrls[0].String())
if origpeer != cfg.APUrls[0].String() {
t.Fatalf("advertise peer url expected %q, got %q", origadvc, cfg.APUrls[0].String())
}
// advertise client URL should not be affected
if origadvc != cfg.AdvertiseClientUrls[0].String() {
t.Fatalf("advertise client url expected %q, got %q", origadvc, cfg.AdvertiseClientUrls[0].String())
if origadvc != cfg.ACUrls[0].String() {
t.Fatalf("advertise client url expected %q, got %q", origadvc, cfg.ACUrls[0].String())
}
}
@ -109,17 +107,17 @@ func TestUpdateDefaultClusterFromNameOverwrite(t *testing.T) {
cfg := NewConfig()
defaultInitialCluster := cfg.InitialCluster
oldscheme := cfg.AdvertisePeerUrls[0].Scheme
origadvc := cfg.AdvertiseClientUrls[0].String()
oldscheme := cfg.APUrls[0].Scheme
origadvc := cfg.ACUrls[0].String()
cfg.Name = "abc"
lpport := cfg.ListenPeerUrls[0].Port()
cfg.ListenPeerUrls[0] = url.URL{Scheme: cfg.ListenPeerUrls[0].Scheme, Host: fmt.Sprintf("0.0.0.0:%s", lpport)}
lpport := cfg.LPUrls[0].Port()
cfg.LPUrls[0] = url.URL{Scheme: cfg.LPUrls[0].Scheme, Host: fmt.Sprintf("0.0.0.0:%s", lpport)}
dhost, _ := cfg.UpdateDefaultClusterFromName(defaultInitialCluster)
if dhost != defaultHostname {
t.Fatalf("expected default host %q, got %q", defaultHostname, dhost)
}
aphost, apport := cfg.AdvertisePeerUrls[0].Hostname(), cfg.AdvertisePeerUrls[0].Port()
aphost, apport := cfg.APUrls[0].Hostname(), cfg.APUrls[0].Port()
if apport != lpport {
t.Fatalf("advertise peer url got different port %s, expected %s", apport, lpport)
}
@ -132,8 +130,8 @@ func TestUpdateDefaultClusterFromNameOverwrite(t *testing.T) {
}
// advertise client URL should not be affected
if origadvc != cfg.AdvertiseClientUrls[0].String() {
t.Fatalf("advertise-client-url expected %q, got %q", origadvc, cfg.AdvertiseClientUrls[0].String())
if origadvc != cfg.ACUrls[0].String() {
t.Fatalf("advertise-client-url expected %q, got %q", origadvc, cfg.ACUrls[0].String())
}
}
@ -204,80 +202,3 @@ func TestAutoCompactionModeParse(t *testing.T) {
}
}
}
func TestTLSVersionMinMax(t *testing.T) {
tests := []struct {
name string
givenTLSMinVersion string
givenTLSMaxVersion string
givenCipherSuites []string
expectError bool
expectedMinTLSVersion uint16
expectedMaxTLSVersion uint16
}{
{
name: "Minimum TLS version is set",
givenTLSMinVersion: "TLS1.3",
expectedMinTLSVersion: tls.VersionTLS13,
expectedMaxTLSVersion: 0,
},
{
name: "Maximum TLS version is set",
givenTLSMaxVersion: "TLS1.2",
expectedMinTLSVersion: 0,
expectedMaxTLSVersion: tls.VersionTLS12,
},
{
name: "Minimum and Maximum TLS versions are set",
givenTLSMinVersion: "TLS1.3",
givenTLSMaxVersion: "TLS1.3",
expectedMinTLSVersion: tls.VersionTLS13,
expectedMaxTLSVersion: tls.VersionTLS13,
},
{
name: "Minimum and Maximum TLS versions are set in reverse order",
givenTLSMinVersion: "TLS1.3",
givenTLSMaxVersion: "TLS1.2",
expectError: true,
},
{
name: "Invalid minimum TLS version",
givenTLSMinVersion: "invalid version",
expectError: true,
},
{
name: "Invalid maximum TLS version",
givenTLSMaxVersion: "invalid version",
expectError: true,
},
{
name: "Cipher suites configured for TLS 1.3",
givenTLSMinVersion: "TLS1.3",
givenCipherSuites: []string{"TLS_AES_128_GCM_SHA256"},
expectError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := NewConfig()
cfg.TlsMinVersion = tt.givenTLSMinVersion
cfg.TlsMaxVersion = tt.givenTLSMaxVersion
cfg.CipherSuites = tt.givenCipherSuites
err := cfg.Validate()
if err != nil {
assert.True(t, tt.expectError, "Validate() returned error while expecting success: %v", err)
return
}
updateMinMaxVersions(&cfg.PeerTLSInfo, cfg.TlsMinVersion, cfg.TlsMaxVersion)
updateMinMaxVersions(&cfg.ClientTLSInfo, cfg.TlsMinVersion, cfg.TlsMaxVersion)
assert.Equal(t, tt.expectedMinTLSVersion, cfg.PeerTLSInfo.MinVersion)
assert.Equal(t, tt.expectedMaxTLSVersion, cfg.PeerTLSInfo.MaxVersion)
assert.Equal(t, tt.expectedMinTLSVersion, cfg.ClientTLSInfo.MinVersion)
assert.Equal(t, tt.expectedMaxTLSVersion, cfg.ClientTLSInfo.MaxVersion)
})
}
}

View File

@ -20,7 +20,6 @@ import (
"fmt"
"io/ioutil"
defaultLog "log"
"math"
"net"
"net/http"
"net/url"
@ -30,7 +29,6 @@ import (
"sync"
"time"
"go.etcd.io/etcd/clientv3/credentials"
"go.etcd.io/etcd/etcdserver"
"go.etcd.io/etcd/etcdserver/api/etcdhttp"
"go.etcd.io/etcd/etcdserver/api/rafthttp"
@ -118,7 +116,7 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
if e.cfg.logger != nil {
e.cfg.logger.Info(
"configuring peer listeners",
zap.Strings("listen-peer-urls", e.cfg.getListenPeerUrls()),
zap.Strings("listen-peer-urls", e.cfg.getLPURLs()),
)
}
if e.Peers, err = configurePeerListeners(cfg); err != nil {
@ -128,7 +126,7 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
if e.cfg.logger != nil {
e.cfg.logger.Info(
"configuring client listeners",
zap.Strings("listen-client-urls", e.cfg.getListenClientUrls()),
zap.Strings("listen-client-urls", e.cfg.getLCURLs()),
)
}
if e.sctxs, err = configureClientListeners(cfg); err != nil {
@ -165,8 +163,8 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
srvcfg := etcdserver.ServerConfig{
Name: cfg.Name,
ClientURLs: cfg.AdvertiseClientUrls,
PeerURLs: cfg.AdvertisePeerUrls,
ClientURLs: cfg.ACUrls,
PeerURLs: cfg.APUrls,
DataDir: cfg.Dir,
DedicatedWALDir: cfg.WalDir,
SnapshotCount: cfg.SnapshotCount,
@ -190,7 +188,6 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
BackendBatchInterval: cfg.BackendBatchInterval,
MaxTxnOps: cfg.MaxTxnOps,
MaxRequestBytes: cfg.MaxRequestBytes,
MaxConcurrentStreams: cfg.MaxConcurrentStreams,
StrictReconfigCheck: cfg.StrictReconfigCheck,
ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth,
AuthToken: cfg.AuthToken,
@ -210,10 +207,8 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
EnableGRPCGateway: cfg.EnableGRPCGateway,
UnsafeNoFsync: cfg.UnsafeNoFsync,
EnableLeaseCheckpoint: cfg.ExperimentalEnableLeaseCheckpoint,
LeaseCheckpointPersist: cfg.ExperimentalEnableLeaseCheckpointPersist,
CompactionBatchLimit: cfg.ExperimentalCompactionBatchLimit,
WatchProgressNotifyInterval: cfg.ExperimentalWatchProgressNotifyInterval,
WarningApplyDuration: cfg.ExperimentalWarningApplyDuration,
}
print(e.cfg.logger, *cfg, srvcfg, memberInitialized)
if e.Server, err = etcdserver.NewServer(srvcfg); err != nil {
@ -249,10 +244,10 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
e.cfg.logger.Info(
"now serving peer/client/metrics",
zap.String("local-member-id", e.Server.ID().String()),
zap.Strings("initial-advertise-peer-urls", e.cfg.getAdvertisePeerUrls()),
zap.Strings("listen-peer-urls", e.cfg.getListenPeerUrls()),
zap.Strings("advertise-client-urls", e.cfg.getAdvertiseClientUrls()),
zap.Strings("listen-client-urls", e.cfg.getListenClientUrls()),
zap.Strings("initial-advertise-peer-urls", e.cfg.getAPURLs()),
zap.Strings("listen-peer-urls", e.cfg.getLPURLs()),
zap.Strings("advertise-client-urls", e.cfg.getACURLs()),
zap.Strings("listen-client-urls", e.cfg.getLCURLs()),
zap.Strings("listen-metrics-urls", e.cfg.getMetricsURLs()),
)
}
@ -324,23 +319,18 @@ func print(lg *zap.Logger, ec Config, sc etcdserver.ServerConfig, memberInitiali
zap.String("election-timeout", fmt.Sprintf("%v", time.Duration(sc.ElectionTicks*int(sc.TickMs))*time.Millisecond)),
zap.Bool("initial-election-tick-advance", sc.InitialElectionTickAdvance),
zap.Uint64("snapshot-count", sc.SnapshotCount),
zap.Uint("max-wals", sc.MaxWALFiles),
zap.Uint("max-snapshots", sc.MaxSnapFiles),
zap.Uint64("snapshot-catchup-entries", sc.SnapshotCatchUpEntries),
zap.Strings("initial-advertise-peer-urls", ec.getAdvertisePeerUrls()),
zap.Strings("listen-peer-urls", ec.getListenPeerUrls()),
zap.Strings("advertise-client-urls", ec.getAdvertiseClientUrls()),
zap.Strings("listen-client-urls", ec.getListenClientUrls()),
zap.Strings("initial-advertise-peer-urls", ec.getAPURLs()),
zap.Strings("listen-peer-urls", ec.getLPURLs()),
zap.Strings("advertise-client-urls", ec.getACURLs()),
zap.Strings("listen-client-urls", ec.getLCURLs()),
zap.Strings("listen-metrics-urls", ec.getMetricsURLs()),
zap.Strings("cors", cors),
zap.Strings("host-whitelist", hss),
zap.String("initial-cluster", sc.InitialPeerURLsMap.String()),
zap.String("initial-cluster-state", ec.ClusterState),
zap.String("initial-cluster-token", sc.InitialClusterToken),
zap.Int64("quota-backend-bytes", quota),
zap.Uint("max-request-bytes", sc.MaxRequestBytes),
zap.Uint32("max-concurrent-streams", sc.MaxConcurrentStreams),
zap.Int64("quota-size-bytes", quota),
zap.Bool("pre-vote", sc.PreVote),
zap.Bool("initial-corrupt-check", sc.InitialCorruptCheck),
zap.String("corrupt-check-time-interval", sc.CorruptCheckTime.String()),
@ -365,8 +355,8 @@ func (e *Etcd) Close() {
fields := []zap.Field{
zap.String("name", e.cfg.Name),
zap.String("data-dir", e.cfg.Dir),
zap.Strings("advertise-peer-urls", e.cfg.getAdvertisePeerUrls()),
zap.Strings("advertise-client-urls", e.cfg.getAdvertiseClientUrls()),
zap.Strings("advertise-peer-urls", e.cfg.getAPURLs()),
zap.Strings("advertise-client-urls", e.cfg.getACURLs()),
}
lg := e.GetLogger()
if lg != nil {
@ -434,7 +424,7 @@ func stopServers(ctx context.Context, ss *servers) {
// do not grpc.Server.GracefulStop with TLS enabled etcd server
// See https://github.com/grpc/grpc-go/issues/1384#issuecomment-317124531
// and https://github.com/etcd-io/etcd/issues/8916
if ss.secure && ss.http != nil {
if ss.secure {
shutdownNow()
return
}
@ -473,9 +463,6 @@ func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) {
plog.Fatalf("could not get certs (%v)", err)
}
}
updateMinMaxVersions(&cfg.PeerTLSInfo, cfg.TlsMinVersion, cfg.TlsMaxVersion)
if !cfg.PeerTLSInfo.Empty() {
if cfg.logger != nil {
cfg.logger.Info(
@ -488,7 +475,7 @@ func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) {
}
}
peers = make([]*peerListener, len(cfg.ListenPeerUrls))
peers = make([]*peerListener, len(cfg.LPUrls))
defer func() {
if err == nil {
return
@ -498,11 +485,11 @@ func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) {
if cfg.logger != nil {
cfg.logger.Warn(
"closing peer listener",
zap.String("address", cfg.ListenPeerUrls[i].String()),
zap.String("address", cfg.LPUrls[i].String()),
zap.Error(err),
)
} else {
plog.Info("stopping listening for peers on ", cfg.ListenPeerUrls[i].String())
plog.Info("stopping listening for peers on ", cfg.LPUrls[i].String())
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
peers[i].close(ctx)
@ -511,7 +498,7 @@ func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) {
}
}()
for i, u := range cfg.ListenPeerUrls {
for i, u := range cfg.LPUrls {
if u.Scheme == "http" {
if !cfg.PeerTLSInfo.Empty() {
if cfg.logger != nil {
@ -613,9 +600,6 @@ func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err erro
plog.Fatalf("could not get certs (%v)", err)
}
}
updateMinMaxVersions(&cfg.ClientTLSInfo, cfg.TlsMinVersion, cfg.TlsMaxVersion)
if cfg.EnablePprof {
if cfg.logger != nil {
cfg.logger.Info("pprof is enabled", zap.String("path", debugutil.HTTPPrefixPProf))
@ -625,7 +609,8 @@ func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err erro
}
sctxs = make(map[string]*serveCtx)
for _, u := range append(cfg.ListenClientUrls, cfg.ListenClientHttpUrls...) {
for _, u := range cfg.LCUrls {
sctx := newServeCtx(cfg.logger)
if u.Scheme == "http" || u.Scheme == "unix" {
if !cfg.ClientTLSInfo.Empty() {
if cfg.logger != nil {
@ -645,45 +630,29 @@ func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err erro
if (u.Scheme == "https" || u.Scheme == "unixs") && cfg.ClientTLSInfo.Empty() {
return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPS scheme", u.String())
}
}
for _, u := range cfg.ListenClientUrls {
addr, secure, network := resolveUrl(u)
sctx := sctxs[addr]
if sctx == nil {
sctx = newServeCtx(cfg.logger)
sctxs[addr] = sctx
network := "tcp"
addr := u.Host
if u.Scheme == "unix" || u.Scheme == "unixs" {
network = "unix"
addr = u.Host + u.Path
}
sctx.secure = sctx.secure || secure
sctx.insecure = sctx.insecure || !secure
sctx.scheme = u.Scheme
sctx.addr = addr
sctx.network = network
}
for _, u := range cfg.ListenClientHttpUrls {
addr, secure, network := resolveUrl(u)
sctx := sctxs[addr]
if sctx == nil {
sctx = newServeCtx(cfg.logger)
sctxs[addr] = sctx
} else if !sctx.httpOnly {
return nil, fmt.Errorf("cannot bind both --client-listen-urls and --client-listen-http-urls on the same url %s", u.String())
}
sctx.secure = sctx.secure || secure
sctx.insecure = sctx.insecure || !secure
sctx.scheme = u.Scheme
sctx.addr = addr
sctx.network = network
sctx.httpOnly = true
sctx.secure = u.Scheme == "https" || u.Scheme == "unixs"
sctx.insecure = !sctx.secure
if oldctx := sctxs[addr]; oldctx != nil {
oldctx.secure = oldctx.secure || sctx.secure
oldctx.insecure = oldctx.insecure || sctx.insecure
continue
}
for _, sctx := range sctxs {
if sctx.l, err = net.Listen(sctx.network, sctx.addr); err != nil {
if sctx.l, err = net.Listen(network, addr); err != nil {
return nil, err
}
// net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking
// hosts that disable ipv6. So, use the address given by the user.
sctx.addr = addr
if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil {
if fdLimit <= reservedInternalFDNum {
@ -700,27 +669,27 @@ func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err erro
sctx.l = transport.LimitListener(sctx.l, int(fdLimit-reservedInternalFDNum))
}
if sctx.network == "tcp" {
if sctx.l, err = transport.NewKeepAliveListener(sctx.l, sctx.network, nil); err != nil {
if network == "tcp" {
if sctx.l, err = transport.NewKeepAliveListener(sctx.l, network, nil); err != nil {
return nil, err
}
}
defer func(sctx *serveCtx) {
if err == nil || sctx.l == nil {
defer func() {
if err == nil {
return
}
sctx.l.Close()
if cfg.logger != nil {
cfg.logger.Warn(
"closing peer listener",
zap.String("address", sctx.addr),
zap.String("address", u.Host),
zap.Error(err),
)
} else {
plog.Info("stopping listening for client requests on ", sctx.addr)
plog.Info("stopping listening for client requests on ", u.Host)
}
}(sctx)
}()
for k := range cfg.UserHandlers {
sctx.userHandlers[k] = cfg.UserHandlers[k]
}
@ -731,21 +700,11 @@ func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err erro
if cfg.Debug {
sctx.registerTrace()
}
sctxs[addr] = sctx
}
return sctxs, nil
}
func resolveUrl(u url.URL) (addr string, secure bool, network string) {
addr = u.Host
network = "tcp"
if u.Scheme == "unix" || u.Scheme == "unixs" {
addr = u.Host + u.Path
network = "unix"
}
secure = u.Scheme == "https" || u.Scheme == "unixs"
return addr, secure, network
}
func (e *Etcd) serveClients() (err error) {
if !e.cfg.ClientTLSInfo.Empty() {
if e.cfg.logger != nil {
@ -789,69 +748,15 @@ func (e *Etcd) serveClients() (err error) {
}))
}
splitHttp := false
for _, sctx := range e.sctxs {
if sctx.httpOnly {
splitHttp = true
}
}
// start client servers in each goroutine
for _, sctx := range e.sctxs {
go func(s *serveCtx) {
e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, h, e.errHandler, e.grpcGatewayDial(splitHttp), splitHttp, gopts...))
e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, h, e.errHandler, gopts...))
}(sctx)
}
return nil
}
func (e *Etcd) grpcGatewayDial(splitHttp bool) (grpcDial func(ctx context.Context) (*grpc.ClientConn, error)) {
if !e.cfg.EnableGRPCGateway {
return nil
}
sctx := e.pickGrpcGatewayServeContext(splitHttp)
addr := sctx.addr
if network := sctx.network; network == "unix" {
// explicitly define unix network for gRPC socket support
addr = fmt.Sprintf("%s://%s", network, addr)
}
opts := []grpc.DialOption{grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32))}
if sctx.secure {
tlscfg, tlsErr := e.cfg.ClientTLSInfo.ServerConfig()
if tlsErr != nil {
return func(ctx context.Context) (*grpc.ClientConn, error) {
return nil, tlsErr
}
}
dtls := tlscfg.Clone()
// trust local server
dtls.InsecureSkipVerify = true
bundle := credentials.NewBundle(credentials.Config{TLSConfig: dtls})
opts = append(opts, grpc.WithTransportCredentials(bundle.TransportCredentials()))
} else {
opts = append(opts, grpc.WithInsecure())
}
return func(ctx context.Context) (*grpc.ClientConn, error) {
conn, err := grpc.DialContext(ctx, addr, opts...)
if err != nil {
sctx.lg.Error("grpc gateway failed to dial", zap.String("addr", addr), zap.Error(err))
return nil, err
}
return conn, err
}
}
func (e *Etcd) pickGrpcGatewayServeContext(splitHttp bool) *serveCtx {
for _, sctx := range e.sctxs {
if !splitHttp || !sctx.httpOnly {
return sctx
}
}
panic("Expect at least one context able to serve grpc")
}
func (e *Etcd) serveMetrics() (err error) {
if e.cfg.Metrics == "extensive" {
grpc_prometheus.EnableHandlingTimeHistogram()

View File

@ -23,6 +23,7 @@ import (
"net/http"
"strings"
"go.etcd.io/etcd/clientv3/credentials"
"go.etcd.io/etcd/etcdserver"
"go.etcd.io/etcd/etcdserver/api/v3client"
"go.etcd.io/etcd/etcdserver/api/v3election"
@ -41,7 +42,6 @@ import (
"github.com/soheilhy/cmux"
"github.com/tmc/grpc-websocket-proxy/wsproxy"
"go.uber.org/zap"
"golang.org/x/net/http2"
"golang.org/x/net/trace"
"google.golang.org/grpc"
)
@ -49,13 +49,10 @@ import (
type serveCtx struct {
lg *zap.Logger
l net.Listener
scheme string
addr string
network string
secure bool
insecure bool
httpOnly bool
ctx context.Context
cancel context.CancelFunc
@ -90,8 +87,6 @@ func (sctx *serveCtx) serve(
tlsinfo *transport.TLSInfo,
handler http.Handler,
errHandler func(error),
grpcDialForRestGatewayBackends func(ctx context.Context) (*grpc.ClientConn, error),
splitHttp bool,
gopts ...grpc.ServerOption) (err error) {
logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0)
<-s.ReadyNotify()
@ -101,103 +96,48 @@ func (sctx *serveCtx) serve(
}
m := cmux.New(sctx.l)
var server func() error
onlyGRPC := splitHttp && !sctx.httpOnly
onlyHttp := splitHttp && sctx.httpOnly
grpcEnabled := !onlyHttp
httpEnabled := !onlyGRPC
v3c := v3client.New(s)
servElection := v3election.NewElectionServer(v3c)
servLock := v3lock.NewLockServer(v3c)
// Make sure serversC is closed even if we prematurely exit the function.
defer close(sctx.serversC)
var gwmux *gw.ServeMux
if s.Cfg.EnableGRPCGateway {
// GRPC gateway connects to grpc server via connection provided by grpc dial.
gwmux, err = sctx.registerGateway(grpcDialForRestGatewayBackends)
if err != nil {
sctx.lg.Error("registerGateway failed", zap.Error(err))
return err
}
}
var traffic string
switch {
case onlyGRPC:
traffic = "grpc"
case onlyHttp:
traffic = "http"
default:
traffic = "grpc+http"
var gs *grpc.Server
defer func() {
if err != nil && gs != nil {
gs.Stop()
}
}()
if sctx.insecure {
var gs *grpc.Server
var srv *http.Server
if httpEnabled {
httpmux := sctx.createMux(gwmux, handler)
srv = &http.Server{
Handler: createAccessController(sctx.lg, s, httpmux),
ErrorLog: logger, // do not log user error
}
if err := configureHttpServer(srv, s.Cfg); err != nil {
sctx.lg.Error("Configure http server failed", zap.Error(err))
return err
}
}
if grpcEnabled {
gs = v3rpc.Server(s, nil, gopts...)
v3electionpb.RegisterElectionServer(gs, servElection)
v3lockpb.RegisterLockServer(gs, servLock)
if sctx.serviceRegister != nil {
sctx.serviceRegister(gs)
}
defer func(gs *grpc.Server) {
if err == nil {
return
}
if sctx.lg != nil {
sctx.lg.Warn("stopping insecure grpc server due to error", zap.Error(err))
} else {
plog.Warningf("stopping insecure grpc server due to error: %s", err)
}
gs.Stop()
if sctx.lg != nil {
sctx.lg.Warn("stopped insecure grpc server due to error", zap.Error(err))
} else {
plog.Warningf("stopped insecure grpc server due to error: %s", err)
}
}(gs)
}
if onlyGRPC {
server = func() error {
return gs.Serve(sctx.l)
}
} else {
server = m.Serve
httpl := m.Match(cmux.HTTP1())
go func(srvhttp *http.Server, tlsLis net.Listener) {
errHandler(srvhttp.Serve(tlsLis))
}(srv, httpl)
if grpcEnabled {
grpcl := m.Match(cmux.HTTP2())
go func(gs *grpc.Server, l net.Listener) {
errHandler(gs.Serve(l))
}(gs, grpcl)
go func() { errHandler(gs.Serve(grpcl)) }()
var gwmux *gw.ServeMux
if s.Cfg.EnableGRPCGateway {
gwmux, err = sctx.registerGateway([]grpc.DialOption{grpc.WithInsecure()})
if err != nil {
return err
}
}
sctx.serversC <- &servers{grpc: gs, http: srv}
httpmux := sctx.createMux(gwmux, handler)
srvhttp := &http.Server{
Handler: createAccessController(sctx.lg, s, httpmux),
ErrorLog: logger, // do not log user error
}
httpl := m.Match(cmux.HTTP1())
go func() { errHandler(srvhttp.Serve(httpl)) }()
sctx.serversC <- &servers{grpc: gs, http: srvhttp}
if sctx.lg != nil {
sctx.lg.Info(
"serving client traffic insecurely; this is strongly discouraged!",
zap.String("traffic", traffic),
zap.String("address", sctx.l.Addr().String()),
)
} else {
@ -206,77 +146,50 @@ func (sctx *serveCtx) serve(
}
if sctx.secure {
var gs *grpc.Server
var srv *http.Server
tlscfg, tlsErr := tlsinfo.ServerConfig()
if tlsErr != nil {
return tlsErr
}
if grpcEnabled {
gs = v3rpc.Server(s, tlscfg, gopts...)
v3electionpb.RegisterElectionServer(gs, servElection)
v3lockpb.RegisterLockServer(gs, servLock)
if sctx.serviceRegister != nil {
sctx.serviceRegister(gs)
}
defer func(gs *grpc.Server) {
if err == nil {
return
}
if sctx.lg != nil {
sctx.lg.Warn("stopping secure grpc server due to error", zap.Error(err))
} else {
plog.Warningf("stopping secure grpc server due to error: %s", err)
}
gs.Stop()
if sctx.lg != nil {
sctx.lg.Warn("stopped secure grpc server due to error", zap.Error(err))
} else {
plog.Warningf("stopped secure grpc server due to error: %s", err)
}
}(gs)
}
if httpEnabled {
if grpcEnabled {
handler = grpcHandlerFunc(gs, handler)
var gwmux *gw.ServeMux
if s.Cfg.EnableGRPCGateway {
dtls := tlscfg.Clone()
// trust local server
dtls.InsecureSkipVerify = true
bundle := credentials.NewBundle(credentials.Config{TLSConfig: dtls})
opts := []grpc.DialOption{grpc.WithTransportCredentials(bundle.TransportCredentials())}
gwmux, err = sctx.registerGateway(opts)
if err != nil {
return err
}
}
var tlsl net.Listener
tlsl, err = transport.NewTLSListener(m.Match(cmux.Any()), tlsinfo)
if err != nil {
return err
}
// TODO: add debug flag; enable logging when debug flag is set
httpmux := sctx.createMux(gwmux, handler)
srv = &http.Server{
srv := &http.Server{
Handler: createAccessController(sctx.lg, s, httpmux),
TLSConfig: tlscfg,
ErrorLog: logger, // do not log user error
}
if err := configureHttpServer(srv, s.Cfg); err != nil {
sctx.lg.Error("Configure https server failed", zap.Error(err))
return err
}
}
if onlyGRPC {
server = func() error { return gs.Serve(sctx.l) }
} else {
server = m.Serve
tlsl, err := transport.NewTLSListener(m.Match(cmux.Any()), tlsinfo)
if err != nil {
return err
}
go func(srvhttp *http.Server, tlsl net.Listener) {
errHandler(srvhttp.Serve(tlsl))
}(srv, tlsl)
}
go func() { errHandler(srv.Serve(tlsl)) }()
sctx.serversC <- &servers{secure: true, grpc: gs, http: srv}
if sctx.lg != nil {
sctx.lg.Info(
"serving client traffic securely",
zap.String("traffic", traffic),
zap.String("address", sctx.l.Addr().String()),
)
} else {
@ -284,16 +197,8 @@ func (sctx *serveCtx) serve(
}
}
return server()
}
func configureHttpServer(srv *http.Server, cfg etcdserver.ServerConfig) error {
// todo (ahrtr): should we support configuring other parameters in the future as well?
return http2.ConfigureServer(srv, &http2.Server{
MaxConcurrentStreams: cfg.MaxConcurrentStreams,
// Override to avoid using priority scheduler which is affected by https://github.com/golang/go/issues/58804.
NewWriteScheduler: http2.NewRandomWriteScheduler,
})
close(sctx.serversC)
return m.Serve()
}
// grpcHandlerFunc returns an http.Handler that delegates to grpcServer on incoming gRPC
@ -315,10 +220,16 @@ func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Ha
type registerHandlerFunc func(context.Context, *gw.ServeMux, *grpc.ClientConn) error
func (sctx *serveCtx) registerGateway(dial func(ctx context.Context) (*grpc.ClientConn, error)) (*gw.ServeMux, error) {
func (sctx *serveCtx) registerGateway(opts []grpc.DialOption) (*gw.ServeMux, error) {
ctx := sctx.ctx
conn, err := dial(ctx)
addr := sctx.addr
if network := sctx.network; network == "unix" {
// explicitly define unix network for gRPC socket support
addr = fmt.Sprintf("%s://%s", network, addr)
}
conn, err := grpc.DialContext(ctx, addr, opts...)
if err != nil {
return nil, err
}
@ -357,18 +268,6 @@ func (sctx *serveCtx) registerGateway(dial func(ctx context.Context) (*grpc.Clie
return gwmux, nil
}
type wsProxyZapLogger struct {
*zap.Logger
}
func (w wsProxyZapLogger) Warnln(i ...interface{}) {
w.Warn(fmt.Sprint(i...))
}
func (w wsProxyZapLogger) Debugln(i ...interface{}) {
w.Debug(fmt.Sprint(i...))
}
func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.ServeMux {
httpmux := http.NewServeMux()
for path, h := range sctx.userHandlers {
@ -388,7 +287,6 @@ func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.
},
),
wsproxy.WithMaxRespBodyBufferSize(0x7fffffff),
wsproxy.WithLogger(wsProxyZapLogger{sctx.lg}),
),
)
}

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build cov
// +build cov
package ctlv2

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !cov
// +build !cov
package ctlv2

View File

@ -311,8 +311,6 @@ func newCheckDatascaleCommand(cmd *cobra.Command, args []string) {
ExitWithError(ExitError, errEndpoints)
}
sec := secureCfgFromCmd(cmd)
ctx, cancel := context.WithCancel(context.Background())
resp, err := clients[0].Get(ctx, checkDatascalePrefix, v3.WithPrefix(), v3.WithLimit(1))
cancel()
@ -331,7 +329,7 @@ func newCheckDatascaleCommand(cmd *cobra.Command, args []string) {
wg.Add(len(clients))
// get the process_resident_memory_bytes and process_virtual_memory_bytes before the put operations
bytesBefore := endpointMemoryMetrics(eps[0], sec)
bytesBefore := endpointMemoryMetrics(eps[0])
if bytesBefore == 0 {
fmt.Println("FAIL: Could not read process_resident_memory_bytes before the put operations.")
os.Exit(ExitError)
@ -369,7 +367,7 @@ func newCheckDatascaleCommand(cmd *cobra.Command, args []string) {
s := <-sc
// get the process_resident_memory_bytes after the put operations
bytesAfter := endpointMemoryMetrics(eps[0], sec)
bytesAfter := endpointMemoryMetrics(eps[0])
if bytesAfter == 0 {
fmt.Println("FAIL: Could not read process_resident_memory_bytes after the put operations.")
os.Exit(ExitError)

View File

@ -31,7 +31,6 @@ var (
getFromKey bool
getRev int64
getKeysOnly bool
getCountOnly bool
printValueOnly bool
)
@ -51,7 +50,6 @@ func NewGetCommand() *cobra.Command {
cmd.Flags().BoolVar(&getFromKey, "from-key", false, "Get keys that are greater than or equal to the given key using byte compare")
cmd.Flags().Int64Var(&getRev, "rev", 0, "Specify the kv revision")
cmd.Flags().BoolVar(&getKeysOnly, "keys-only", false, "Get only the keys")
cmd.Flags().BoolVar(&getCountOnly, "count-only", false, "Get only the count")
cmd.Flags().BoolVar(&printValueOnly, "print-value-only", false, `Only write values when using the "simple" output format`)
return cmd
}
@ -66,12 +64,6 @@ func getCommandFunc(cmd *cobra.Command, args []string) {
ExitWithError(ExitError, err)
}
if getCountOnly {
if _, fields := display.(*fieldsPrinter); !fields {
ExitWithError(ExitBadArgs, fmt.Errorf("--count-only is only for `--write-out=fields`"))
}
}
if printValueOnly {
dp, simple := (display).(*simplePrinter)
if !simple {
@ -91,10 +83,6 @@ func getGetOp(args []string) (string, []clientv3.OpOption) {
ExitWithError(ExitBadArgs, fmt.Errorf("`--prefix` and `--from-key` cannot be set at the same time, choose one"))
}
if getKeysOnly && getCountOnly {
ExitWithError(ExitBadArgs, fmt.Errorf("`--keys-only` and `--count-only` cannot be set at the same time, choose one"))
}
opts := []clientv3.OpOption{}
switch getConsistency {
case "s":
@ -171,9 +159,5 @@ func getGetOp(args []string) (string, []clientv3.OpOption) {
opts = append(opts, clientv3.WithKeysOnly())
}
if getCountOnly {
opts = append(opts, clientv3.WithCountOnly())
}
return key, opts
}

View File

@ -42,8 +42,7 @@ func transferLeadershipCommandFunc(cmd *cobra.Command, args []string) {
ExitWithError(ExitBadArgs, err)
}
cfg := clientConfigFromCmd(cmd)
c := cfg.mustClient()
c := mustClientFromCmd(cmd)
eps := c.Endpoints()
c.Close()
@ -53,6 +52,7 @@ func transferLeadershipCommandFunc(cmd *cobra.Command, args []string) {
var leaderCli *clientv3.Client
var leaderID uint64
for _, ep := range eps {
cfg := clientConfigFromCmd(cmd)
cfg.endpoints = []string{ep}
cli := cfg.mustClient()
resp, serr := cli.Status(ctx, ep)

View File

@ -16,7 +16,6 @@ package command
import (
"context"
"crypto/tls"
"encoding/hex"
"fmt"
"io/ioutil"
@ -91,26 +90,14 @@ func isCommandTimeoutFlagSet(cmd *cobra.Command) bool {
return commandTimeoutFlag.Changed
}
// get the process_resident_memory_bytes from <server>/metrics
func endpointMemoryMetrics(host string, scfg *secureCfg) float64 {
// get the process_resident_memory_bytes from <server:2379>/metrics
func endpointMemoryMetrics(host string) float64 {
residentMemoryKey := "process_resident_memory_bytes"
var residentMemoryValue string
if !strings.HasPrefix(host, "http://") && !strings.HasPrefix(host, "https://") {
if !strings.HasPrefix(host, `http://`) {
host = "http://" + host
}
url := host + "/metrics"
if strings.HasPrefix(host, "https://") {
// load client certificate
cert, err := tls.LoadX509KeyPair(scfg.cert, scfg.key)
if err != nil {
fmt.Println(fmt.Sprintf("client certificate error: %v", err))
return 0.0
}
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{
Certificates: []tls.Certificate{cert},
InsecureSkipVerify: scfg.insecureSkipVerify,
}
}
resp, err := http.Get(url)
if err != nil {
fmt.Println(fmt.Sprintf("fetch error: %v", err))

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build cov
// +build cov
package ctlv3

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !cov
// +build !cov
package ctlv3

View File

@ -29,7 +29,6 @@ import (
"go.etcd.io/etcd/embed"
"go.etcd.io/etcd/pkg/flags"
"go.etcd.io/etcd/pkg/logutil"
"go.etcd.io/etcd/pkg/tlsutil"
"go.etcd.io/etcd/pkg/types"
"go.etcd.io/etcd/version"
@ -141,11 +140,7 @@ func newConfig() *config {
)
fs.Var(
flags.NewUniqueURLsWithExceptions(embed.DefaultListenClientURLs, ""), "listen-client-urls",
"List of URLs to listen on for client grpc traffic and http as long as --listen-client-http-urls is not specified.",
)
fs.Var(
flags.NewUniqueURLsWithExceptions("", ""), "listen-client-http-urls",
"List of URLs to listen on for http only client traffic. Enabling this flag removes http services from --listen-client-urls.",
"List of URLs to listen on for client traffic.",
)
fs.Var(
flags.NewUniqueURLsWithExceptions("", ""),
@ -168,8 +163,6 @@ func newConfig() *config {
fs.DurationVar(&cfg.ec.GRPCKeepAliveInterval, "grpc-keepalive-interval", cfg.ec.GRPCKeepAliveInterval, "Frequency duration of server-to-client ping to check if a connection is alive (0 to disable).")
fs.DurationVar(&cfg.ec.GRPCKeepAliveTimeout, "grpc-keepalive-timeout", cfg.ec.GRPCKeepAliveTimeout, "Additional duration of wait before closing a non-responsive connection (0 to disable).")
fs.Var(flags.NewUint32Value(cfg.ec.MaxConcurrentStreams), "max-concurrent-streams", "Maximum concurrent streams that each client can open at a time.")
// clustering
fs.Var(
flags.NewUniqueURLsWithExceptions(embed.DefaultInitialAdvertisePeerURLs, ""),
@ -189,7 +182,7 @@ func newConfig() *config {
fs.StringVar(&cfg.ec.DNSClusterServiceName, "discovery-srv-name", cfg.ec.DNSClusterServiceName, "Service name to query when using DNS discovery.")
fs.StringVar(&cfg.ec.InitialCluster, "initial-cluster", cfg.ec.InitialCluster, "Initial cluster configuration for bootstrapping.")
fs.StringVar(&cfg.ec.InitialClusterToken, "initial-cluster-token", cfg.ec.InitialClusterToken, "Initial cluster token for the etcd cluster during bootstrap.")
fs.Var(cfg.cf.clusterState, "initial-cluster-state", "Initial cluster state ('new' when bootstrapping a new cluster or 'existing' when adding new members to an existing cluster). After successful initialization (bootstrapping or adding), flag is ignored on restarts.")
fs.Var(cfg.cf.clusterState, "initial-cluster-state", "Initial cluster state ('new' or 'existing').")
fs.BoolVar(&cfg.ec.StrictReconfigCheck, "strict-reconfig-check", cfg.ec.StrictReconfigCheck, "Reject reconfiguration requests that would cause quorum loss.")
fs.BoolVar(&cfg.ec.EnableV2, "enable-v2", cfg.ec.EnableV2, "Accept etcd V2 client requests.")
@ -221,8 +214,6 @@ func newConfig() *config {
fs.StringVar(&cfg.ec.PeerTLSInfo.AllowedHostname, "peer-cert-allowed-hostname", "", "Allowed TLS hostname for inter peer authentication.")
fs.Var(flags.NewStringsValue(""), "cipher-suites", "Comma-separated list of supported TLS cipher suites between client/server and peers (empty will be auto-populated by Go).")
fs.BoolVar(&cfg.ec.PeerTLSInfo.SkipClientSANVerify, "experimental-peer-skip-client-san-verification", false, "Skip verification of SAN field in client certificate for peer connections.")
fs.StringVar(&cfg.ec.TlsMinVersion, "tls-min-version", string(tlsutil.TLSVersion12), "Minimum TLS version supported by etcd. Possible values: TLS1.2, TLS1.3.")
fs.StringVar(&cfg.ec.TlsMaxVersion, "tls-max-version", string(tlsutil.TLSVersionDefault), "Maximum TLS version supported by etcd. Possible values: TLS1.2, TLS1.3 (empty defers to Go).")
fs.Var(
flags.NewUniqueURLsWithExceptions("*", "*"),
@ -264,12 +255,9 @@ func newConfig() *config {
fs.DurationVar(&cfg.ec.ExperimentalCorruptCheckTime, "experimental-corrupt-check-time", cfg.ec.ExperimentalCorruptCheckTime, "Duration of time between cluster corruption check passes.")
fs.StringVar(&cfg.ec.ExperimentalEnableV2V3, "experimental-enable-v2v3", cfg.ec.ExperimentalEnableV2V3, "v3 prefix for serving emulated v2 state.")
fs.StringVar(&cfg.ec.ExperimentalBackendFreelistType, "experimental-backend-bbolt-freelist-type", cfg.ec.ExperimentalBackendFreelistType, "ExperimentalBackendFreelistType specifies the type of freelist that boltdb backend uses(array and map are supported types)")
fs.BoolVar(&cfg.ec.ExperimentalEnableLeaseCheckpoint, "experimental-enable-lease-checkpoint", false, "Enable leader to send regular checkpoints to other members to prevent reset of remaining TTL on leader change.")
// TODO: delete in v3.7
fs.BoolVar(&cfg.ec.ExperimentalEnableLeaseCheckpointPersist, "experimental-enable-lease-checkpoint-persist", false, "Enable persisting remainingTTL to prevent indefinite auto-renewal of long lived leases. Always enabled in v3.6. Should be used to ensure smooth upgrade from v3.5 clusters with this feature enabled. Requires experimental-enable-lease-checkpoint to be enabled.")
fs.BoolVar(&cfg.ec.ExperimentalEnableLeaseCheckpoint, "experimental-enable-lease-checkpoint", false, "Enable to persist lease remaining TTL to prevent indefinite auto-renewal of long lived leases.")
fs.IntVar(&cfg.ec.ExperimentalCompactionBatchLimit, "experimental-compaction-batch-limit", cfg.ec.ExperimentalCompactionBatchLimit, "Sets the maximum revisions deleted in each compaction batch.")
fs.DurationVar(&cfg.ec.ExperimentalWatchProgressNotifyInterval, "experimental-watch-progress-notify-interval", cfg.ec.ExperimentalWatchProgressNotifyInterval, "Duration of periodic watch progress notifications.")
fs.DurationVar(&cfg.ec.ExperimentalWarningApplyDuration, "experimental-warning-apply-duration", cfg.ec.ExperimentalWarningApplyDuration, "Time duration after which a warning is generated if request takes more time.")
// unsafe
fs.BoolVar(&cfg.ec.UnsafeNoFsync, "unsafe-no-fsync", false, "Disables fsync, unsafe, will cause data loss.")
@ -336,11 +324,10 @@ func (cfg *config) configFromCmdLine() error {
return err
}
cfg.ec.ListenPeerUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-peer-urls")
cfg.ec.AdvertisePeerUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "initial-advertise-peer-urls")
cfg.ec.ListenClientUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-client-urls")
cfg.ec.ListenClientHttpUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-client-http-urls")
cfg.ec.AdvertiseClientUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "advertise-client-urls")
cfg.ec.LPUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-peer-urls")
cfg.ec.APUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "initial-advertise-peer-urls")
cfg.ec.LCUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-client-urls")
cfg.ec.ACUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "advertise-client-urls")
cfg.ec.ListenMetricsUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-metrics-urls")
cfg.ec.CORS = flags.UniqueURLsMapFromFlag(cfg.cf.flagSet, "cors")
@ -348,8 +335,6 @@ func (cfg *config) configFromCmdLine() error {
cfg.ec.CipherSuites = flags.StringsFromFlag(cfg.cf.flagSet, "cipher-suites")
cfg.ec.MaxConcurrentStreams = flags.Uint32FromFlag(cfg.cf.flagSet, "max-concurrent-streams")
// TODO: remove this in v3.5
cfg.ec.DeprecatedLogOutput = flags.UniqueStringsFromFlag(cfg.cf.flagSet, "log-output")
cfg.ec.LogOutputs = flags.UniqueStringsFromFlag(cfg.cf.flagSet, "log-outputs")
@ -361,7 +346,7 @@ func (cfg *config) configFromCmdLine() error {
// disable default advertise-client-urls if lcurls is set
missingAC := flags.IsSet(cfg.cf.flagSet, "listen-client-urls") && !flags.IsSet(cfg.cf.flagSet, "advertise-client-urls")
if !cfg.mayBeProxy() && missingAC {
cfg.ec.AdvertiseClientUrls = nil
cfg.ec.ACUrls = nil
}
// disable default initial-cluster if discovery is set

View File

@ -36,7 +36,6 @@ func TestConfigParsingMemberFlags(t *testing.T) {
"-snapshot-count=10",
"-listen-peer-urls=http://localhost:8000,https://localhost:8001",
"-listen-client-urls=http://localhost:7000,https://localhost:7001",
"-listen-client-http-urls=http://localhost:7002,https://localhost:7003",
// it should be set if -listen-client-urls is set
"-advertise-client-urls=http://localhost:7000,https://localhost:7001",
}
@ -57,10 +56,9 @@ func TestConfigFileMemberFields(t *testing.T) {
MaxWalFiles uint `json:"max-wals"`
Name string `json:"name"`
SnapshotCount uint64 `json:"snapshot-count"`
ListenPeerUrls string `json:"listen-peer-urls"`
ListenClientUrls string `json:"listen-client-urls"`
ListenClientHttpUrls string `json:"listen-client-http-urls"`
AdvertiseClientUrls string `json:"advertise-client-urls"`
LPUrls string `json:"listen-peer-urls"`
LCUrls string `json:"listen-client-urls"`
AcurlsCfgFile string `json:"advertise-client-urls"`
}{
"testdir",
10,
@ -69,7 +67,6 @@ func TestConfigFileMemberFields(t *testing.T) {
10,
"http://localhost:8000,https://localhost:8001",
"http://localhost:7000,https://localhost:7001",
"http://localhost:7002,https://localhost:7003",
"http://localhost:7000,https://localhost:7001",
}
@ -517,9 +514,8 @@ func mustCreateCfgFile(t *testing.T, b []byte) *os.File {
func validateMemberFlags(t *testing.T, cfg *config) {
wcfg := &embed.Config{
Dir: "testdir",
ListenPeerUrls: []url.URL{{Scheme: "http", Host: "localhost:8000"}, {Scheme: "https", Host: "localhost:8001"}},
ListenClientUrls: []url.URL{{Scheme: "http", Host: "localhost:7000"}, {Scheme: "https", Host: "localhost:7001"}},
ListenClientHttpUrls: []url.URL{{Scheme: "http", Host: "localhost:7002"}, {Scheme: "https", Host: "localhost:7003"}},
LPUrls: []url.URL{{Scheme: "http", Host: "localhost:8000"}, {Scheme: "https", Host: "localhost:8001"}},
LCUrls: []url.URL{{Scheme: "http", Host: "localhost:7000"}, {Scheme: "https", Host: "localhost:7001"}},
MaxSnapFiles: 10,
MaxWalFiles: 10,
Name: "testname",
@ -541,21 +537,18 @@ func validateMemberFlags(t *testing.T, cfg *config) {
if cfg.ec.SnapshotCount != wcfg.SnapshotCount {
t.Errorf("snapcount = %v, want %v", cfg.ec.SnapshotCount, wcfg.SnapshotCount)
}
if !reflect.DeepEqual(cfg.ec.ListenPeerUrls, wcfg.ListenPeerUrls) {
t.Errorf("listen-peer-urls = %v, want %v", cfg.ec.ListenPeerUrls, wcfg.ListenPeerUrls)
if !reflect.DeepEqual(cfg.ec.LPUrls, wcfg.LPUrls) {
t.Errorf("listen-peer-urls = %v, want %v", cfg.ec.LPUrls, wcfg.LPUrls)
}
if !reflect.DeepEqual(cfg.ec.ListenClientUrls, wcfg.ListenClientUrls) {
t.Errorf("listen-client-urls = %v, want %v", cfg.ec.ListenClientUrls, wcfg.ListenClientUrls)
}
if !reflect.DeepEqual(cfg.ec.ListenClientHttpUrls, wcfg.ListenClientHttpUrls) {
t.Errorf("listen-client-http-urls = %v, want %v", cfg.ec.ListenClientHttpUrls, wcfg.ListenClientHttpUrls)
if !reflect.DeepEqual(cfg.ec.LCUrls, wcfg.LCUrls) {
t.Errorf("listen-client-urls = %v, want %v", cfg.ec.LCUrls, wcfg.LCUrls)
}
}
func validateClusteringFlags(t *testing.T, cfg *config) {
wcfg := newConfig()
wcfg.ec.AdvertisePeerUrls = []url.URL{{Scheme: "http", Host: "localhost:8000"}, {Scheme: "https", Host: "localhost:8001"}}
wcfg.ec.AdvertiseClientUrls = []url.URL{{Scheme: "http", Host: "localhost:7000"}, {Scheme: "https", Host: "localhost:7001"}}
wcfg.ec.APUrls = []url.URL{{Scheme: "http", Host: "localhost:8000"}, {Scheme: "https", Host: "localhost:8001"}}
wcfg.ec.ACUrls = []url.URL{{Scheme: "http", Host: "localhost:7000"}, {Scheme: "https", Host: "localhost:7001"}}
wcfg.ec.ClusterState = embed.ClusterStateFlagExisting
wcfg.cf.fallback.Set(fallbackFlagExit)
wcfg.ec.InitialCluster = "0=http://localhost:8000"
@ -573,11 +566,11 @@ func validateClusteringFlags(t *testing.T, cfg *config) {
if cfg.ec.InitialClusterToken != wcfg.ec.InitialClusterToken {
t.Errorf("initialClusterToken = %v, want %v", cfg.ec.InitialClusterToken, wcfg.ec.InitialClusterToken)
}
if !reflect.DeepEqual(cfg.ec.AdvertisePeerUrls, wcfg.ec.AdvertisePeerUrls) {
t.Errorf("initial-advertise-peer-urls = %v, want %v", cfg.ec.AdvertisePeerUrls, wcfg.ec.AdvertisePeerUrls)
if !reflect.DeepEqual(cfg.ec.APUrls, wcfg.ec.APUrls) {
t.Errorf("initial-advertise-peer-urls = %v, want %v", cfg.ec.APUrls, wcfg.ec.APUrls)
}
if !reflect.DeepEqual(cfg.ec.AdvertiseClientUrls, wcfg.ec.AdvertiseClientUrls) {
t.Errorf("advertise-client-urls = %v, want %v", cfg.ec.AdvertiseClientUrls, wcfg.ec.AdvertiseClientUrls)
if !reflect.DeepEqual(cfg.ec.ACUrls, wcfg.ec.ACUrls) {
t.Errorf("advertise-client-urls = %v, want %v", cfg.ec.ACUrls, wcfg.ec.ACUrls)
}
}

View File

@ -251,7 +251,7 @@ func startEtcdOrProxyV2() {
plog.Infof("forgot to set --initial-cluster flag?")
}
}
if types.URLs(cfg.ec.AdvertisePeerUrls).String() == embed.DefaultInitialAdvertisePeerURLs {
if types.URLs(cfg.ec.APUrls).String() == embed.DefaultInitialAdvertisePeerURLs {
if lg != nil {
lg.Warn("forgot to set --initial-advertise-peer-urls?")
} else {
@ -507,11 +507,11 @@ func startProxy(cfg *config) error {
// setup self signed certs when serving https
cHosts, cTLS := []string{}, false
for _, u := range cfg.ec.ListenClientUrls {
for _, u := range cfg.ec.LCUrls {
cHosts = append(cHosts, u.Host)
cTLS = cTLS || u.Scheme == "https"
}
for _, u := range cfg.ec.AdvertiseClientUrls {
for _, u := range cfg.ec.ACUrls {
cHosts = append(cHosts, u.Host)
cTLS = cTLS || u.Scheme == "https"
}
@ -528,7 +528,7 @@ func startProxy(cfg *config) error {
}
// Start a proxy server goroutine for each listen address
for _, u := range cfg.ec.ListenClientUrls {
for _, u := range cfg.ec.LCUrls {
l, err := transport.NewListener(u.Host, u.Scheme, &listenerTLS)
if err != nil {
return err

View File

@ -38,7 +38,6 @@ import (
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"go.etcd.io/etcd/pkg/debugutil"
"go.etcd.io/etcd/pkg/logutil"
"go.etcd.io/etcd/pkg/tlsutil"
"go.etcd.io/etcd/pkg/transport"
"go.etcd.io/etcd/proxy/grpcproxy"
@ -46,7 +45,6 @@ import (
"github.com/soheilhy/cmux"
"github.com/spf13/cobra"
"go.uber.org/zap"
"golang.org/x/net/http2"
"google.golang.org/grpc"
"google.golang.org/grpc/grpclog"
)
@ -74,7 +72,6 @@ var (
grpcProxyListenCA string
grpcProxyListenCert string
grpcProxyListenKey string
grpcProxyListenCipherSuites []string
grpcProxyListenAutoTLS bool
grpcProxyListenCRL string
@ -89,8 +86,6 @@ var (
grpcProxyEnableOrdering bool
grpcProxyDebug bool
maxConcurrentStreams uint32
)
const defaultGRPCMaxCallSendMsgSize = 1.5 * 1024 * 1024
@ -142,7 +137,6 @@ func newGRPCProxyStartCommand() *cobra.Command {
cmd.Flags().StringVar(&grpcProxyListenCert, "cert-file", "", "identify secure connections to the proxy using this TLS certificate file")
cmd.Flags().StringVar(&grpcProxyListenKey, "key-file", "", "identify secure connections to the proxy using this TLS key file")
cmd.Flags().StringVar(&grpcProxyListenCA, "trusted-ca-file", "", "verify certificates of TLS-enabled secure proxy using this CA bundle")
cmd.Flags().StringSliceVar(&grpcProxyListenCipherSuites, "listen-cipher-suites", grpcProxyListenCipherSuites, "Comma-separated list of supported TLS cipher suites between client/proxy (empty will be auto-populated by Go).")
cmd.Flags().BoolVar(&grpcProxyListenAutoTLS, "auto-tls", false, "proxy TLS using generated certificates")
cmd.Flags().StringVar(&grpcProxyListenCRL, "client-crl-file", "", "proxy client certificate revocation list file.")
@ -152,8 +146,6 @@ func newGRPCProxyStartCommand() *cobra.Command {
cmd.Flags().BoolVar(&grpcProxyDebug, "debug", false, "Enable debug-level logging for grpc-proxy.")
cmd.Flags().Uint32Var(&maxConcurrentStreams, "max-concurrent-streams", math.MaxUint32, "Maximum concurrent streams that each client can open at a time.")
return &cmd
}
@ -179,27 +171,20 @@ func startGRPCProxy(cmd *cobra.Command, args []string) {
}
grpclog.SetLoggerV2(gl)
tlsInfo := newTLS(grpcProxyListenCA, grpcProxyListenCert, grpcProxyListenKey)
if len(grpcProxyListenCipherSuites) > 0 {
cs, err := tlsutil.GetCipherSuites(grpcProxyListenCipherSuites)
if err != nil {
log.Fatal(err)
}
tlsInfo.CipherSuites = cs
}
if tlsInfo == nil && grpcProxyListenAutoTLS {
tlsinfo := newTLS(grpcProxyListenCA, grpcProxyListenCert, grpcProxyListenKey)
if tlsinfo == nil && grpcProxyListenAutoTLS {
host := []string{"https://" + grpcProxyListenAddr}
dir := filepath.Join(grpcProxyDataDir, "fixtures", "proxy")
autoTLS, err := transport.SelfCert(lg, dir, host)
if err != nil {
log.Fatal(err)
}
tlsInfo = &autoTLS
tlsinfo = &autoTLS
}
if tlsInfo != nil {
lg.Info("gRPC proxy server TLS", zap.String("tls-info", fmt.Sprintf("%+v", tlsInfo)))
if tlsinfo != nil {
lg.Info("gRPC proxy server TLS", zap.String("tls-info", fmt.Sprintf("%+v", tlsinfo)))
}
m := mustListenCMux(lg, tlsInfo)
m := mustListenCMux(lg, tlsinfo)
grpcl := m.Match(cmux.HTTP2())
defer func() {
grpcl.Close()
@ -209,20 +194,13 @@ func startGRPCProxy(cmd *cobra.Command, args []string) {
client := mustNewClient(lg)
httpClient := mustNewHTTPClient(lg)
srvhttp, httpl := mustHTTPListener(lg, m, tlsInfo, client)
if err := http2.ConfigureServer(srvhttp, &http2.Server{
MaxConcurrentStreams: maxConcurrentStreams,
}); err != nil {
lg.Fatal("Failed to configure the http server", zap.Error(err))
}
srvhttp, httpl := mustHTTPListener(lg, m, tlsinfo, client)
errc := make(chan error)
go func() { errc <- newGRPCProxyServer(lg, client).Serve(grpcl) }()
go func() { errc <- srvhttp.Serve(httpl) }()
go func() { errc <- m.Serve() }()
if len(grpcProxyMetricsListenAddr) > 0 {
mhttpl := mustMetricsListener(lg, tlsInfo)
mhttpl := mustMetricsListener(lg, tlsinfo)
go func() {
mux := http.NewServeMux()
grpcproxy.HandleMetrics(mux, httpClient, client.Endpoints())
@ -348,7 +326,7 @@ func mustListenCMux(lg *zap.Logger, tlsinfo *transport.TLSInfo) cmux.CMux {
func newGRPCProxyServer(lg *zap.Logger, client *clientv3.Client) *grpc.Server {
if grpcProxyEnableOrdering {
vf := ordering.NewOrderViolationSwitchEndpointClosure(client)
vf := ordering.NewOrderViolationSwitchEndpointClosure(*client)
client.KV = ordering.NewKV(client.KV, vf)
lg.Info("waiting for linearized read from cluster to recover ordering")
for {
@ -372,12 +350,12 @@ func newGRPCProxyServer(lg *zap.Logger, client *clientv3.Client) *grpc.Server {
}
kvp, _ := grpcproxy.NewKvProxy(client)
watchp, _ := grpcproxy.NewWatchProxy(client.Ctx(), client)
watchp, _ := grpcproxy.NewWatchProxy(client)
if grpcProxyResolverPrefix != "" {
grpcproxy.Register(client, grpcProxyResolverPrefix, grpcProxyAdvertiseClientURL, grpcProxyResolverTTL)
}
clusterp, _ := grpcproxy.NewClusterProxy(client, grpcProxyAdvertiseClientURL, grpcProxyResolverPrefix)
leasep, _ := grpcproxy.NewLeaseProxy(client.Ctx(), client)
leasep, _ := grpcproxy.NewLeaseProxy(client)
mainp := grpcproxy.NewMaintenanceProxy(client)
authp := grpcproxy.NewAuthProxy(client)
electionp := grpcproxy.NewElectionProxy(client)

View File

@ -62,9 +62,7 @@ Member:
--listen-peer-urls 'http://localhost:2380'
List of URLs to listen on for peer traffic.
--listen-client-urls 'http://localhost:2379'
List of URLs to listen on for client grpc traffic and http as long as --listen-client-http-urls is not specified.
--listen-client-http-urls ''
List of URLs to listen on for http only client traffic. Enabling this flag removes http services from --listen-client-urls.
List of URLs to listen on for client traffic.
--max-snapshots '` + strconv.Itoa(embed.DefaultMaxSnapshots) + `'
Maximum number of snapshot files to retain (0 is unlimited).
--max-wals '` + strconv.Itoa(embed.DefaultMaxWALs) + `'
@ -79,8 +77,6 @@ Member:
Maximum number of operations permitted in a transaction.
--max-request-bytes '1572864'
Maximum client request size in bytes the server will accept.
--max-concurrent-streams 'math.MaxUint32'
Maximum concurrent streams that each client can open at a time.
--grpc-keepalive-min-time '5s'
Minimum duration interval that a client should wait before pinging server.
--grpc-keepalive-interval '2h'
@ -94,8 +90,7 @@ Clustering:
--initial-cluster 'default=http://localhost:2380'
Initial cluster configuration for bootstrapping.
--initial-cluster-state 'new'
Initial cluster state ('new' when bootstrapping a new cluster or 'existing' when adding new members to an existing cluster).
After successful initialization (bootstrapping or adding), flag is ignored on restarts.
Initial cluster state ('new' or 'existing').
--initial-cluster-token 'etcd-cluster'
Initial cluster token for the etcd cluster during bootstrap.
Specifying this can protect you from unintended cross-cluster interaction when running multiple clusters.
@ -161,10 +156,6 @@ Security:
Comma-separated whitelist of origins for CORS, or cross-origin resource sharing, (empty or * means allow all).
--host-whitelist '*'
Acceptable hostnames from HTTP client requests, if server is not secure (empty or * means allow all).
--tls-min-version 'TLS1.2'
Minimum TLS version supported by etcd. Possible values: TLS1.2, TLS1.3.
--tls-max-version ''
Maximum TLS version supported by etcd. Possible values: TLS1.2, TLS1.3 (empty will be auto-populated by Go).
Auth:
--auth-token 'simple'
@ -221,8 +212,6 @@ Experimental feature:
Skip verification of SAN field in client certificate for peer connections.
--experimental-watch-progress-notify-interval '10m'
Duration of periodical watch progress notification.
--experimental-warning-apply-duration '100ms'
Warning is generated if requests take more than this duration.
Unsafe feature:
--force-new-cluster 'false'

View File

@ -36,7 +36,7 @@ const (
// HandleMetricsHealth registers metrics and health handlers.
func HandleMetricsHealth(mux *http.ServeMux, srv etcdserver.ServerV2) {
mux.Handle(PathMetrics, promhttp.Handler())
mux.Handle(PathHealth, NewHealthHandler(func(excludedAlarms AlarmSet) Health { return checkHealth(srv, excludedAlarms) }))
mux.Handle(PathHealth, NewHealthHandler(func() Health { return checkHealth(srv) }))
}
// HandlePrometheus registers prometheus handler on '/metrics'.
@ -45,7 +45,7 @@ func HandlePrometheus(mux *http.ServeMux) {
}
// NewHealthHandler handles '/health' requests.
func NewHealthHandler(hfunc func(excludedAlarms AlarmSet) Health) http.HandlerFunc {
func NewHealthHandler(hfunc func() Health) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
w.Header().Set("Allow", http.MethodGet)
@ -53,8 +53,7 @@ func NewHealthHandler(hfunc func(excludedAlarms AlarmSet) Health) http.HandlerFu
plog.Warningf("/health error (status code %d)", http.StatusMethodNotAllowed)
return
}
excludedAlarms := getExcludedAlarms(r)
h := hfunc(excludedAlarms)
h := hfunc()
d, _ := json.Marshal(h)
if h.Health != "true" {
http.Error(w, string(d), http.StatusServiceUnavailable)
@ -91,38 +90,16 @@ type Health struct {
Health string `json:"health"`
}
type AlarmSet map[string]struct{}
func getExcludedAlarms(r *http.Request) (alarms AlarmSet) {
alarms = make(map[string]struct{}, 2)
alms, found := r.URL.Query()["exclude"]
if found {
for _, alm := range alms {
if len(alms) == 0 {
continue
}
alarms[alm] = struct{}{}
}
}
return alarms
}
// TODO: server NOSPACE, etcdserver.ErrNoLeader in health API
func checkHealth(srv etcdserver.ServerV2, excludedAlarms AlarmSet) Health {
func checkHealth(srv etcdserver.ServerV2) Health {
h := Health{Health: "true"}
as := srv.Alarms()
if len(as) > 0 {
for _, v := range as {
alarmName := v.Alarm.String()
if _, found := excludedAlarms[alarmName]; found {
plog.Debugf("/health excluded alarm %s", v.String())
continue
}
h.Health = "false"
plog.Warningf("/health error due to %s", v.String())
return h
for _, v := range as {
plog.Warningf("/health error due to an alarm %s", v.String())
}
}
@ -145,7 +122,7 @@ func checkHealth(srv etcdserver.ServerV2, excludedAlarms AlarmSet) Health {
if h.Health == "true" {
healthSuccess.Inc()
plog.Debugf("/health OK (status code %d)", http.StatusOK)
plog.Infof("/health OK (status code %d)", http.StatusOK)
} else {
healthFailed.Inc()
}

View File

@ -1,157 +0,0 @@
// Copyright 2021 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdhttp
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"go.etcd.io/etcd/etcdserver"
stats "go.etcd.io/etcd/etcdserver/api/v2stats"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"go.etcd.io/etcd/pkg/testutil"
"go.etcd.io/etcd/pkg/types"
"go.etcd.io/etcd/raft"
)
type fakeStats struct{}
func (s *fakeStats) SelfStats() []byte { return nil }
func (s *fakeStats) LeaderStats() []byte { return nil }
func (s *fakeStats) StoreStats() []byte { return nil }
type fakeServerV2 struct {
fakeServer
stats.Stats
health string
}
func (s *fakeServerV2) Leader() types.ID {
if s.health == "true" {
return 1
}
return types.ID(raft.None)
}
func (s *fakeServerV2) Do(ctx context.Context, r pb.Request) (etcdserver.Response, error) {
if s.health == "true" {
return etcdserver.Response{}, nil
}
return etcdserver.Response{}, fmt.Errorf("fail health check")
}
func (s *fakeServerV2) ClientCertAuthEnabled() bool { return false }
func TestHealthHandler(t *testing.T) {
// define the input and expected output
// input: alarms, and healthCheckURL
tests := []struct {
alarms []*pb.AlarmMember
healthCheckURL string
statusCode int
health string
}{
{
[]*pb.AlarmMember{},
"/health",
http.StatusOK,
"true",
},
{
[]*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}},
"/health",
http.StatusServiceUnavailable,
"false",
},
{
[]*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}},
"/health?exclude=NOSPACE",
http.StatusOK,
"true",
},
{
[]*pb.AlarmMember{},
"/health?exclude=NOSPACE",
http.StatusOK,
"true",
},
{
[]*pb.AlarmMember{{MemberID: uint64(1), Alarm: pb.AlarmType_NOSPACE}, {MemberID: uint64(2), Alarm: pb.AlarmType_NOSPACE}, {MemberID: uint64(3), Alarm: pb.AlarmType_NOSPACE}},
"/health?exclude=NOSPACE",
http.StatusOK,
"true",
},
{
[]*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}, {MemberID: uint64(1), Alarm: pb.AlarmType_CORRUPT}},
"/health?exclude=NOSPACE",
http.StatusServiceUnavailable,
"false",
},
{
[]*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}, {MemberID: uint64(1), Alarm: pb.AlarmType_CORRUPT}},
"/health?exclude=NOSPACE&exclude=CORRUPT",
http.StatusOK,
"true",
},
}
for i, tt := range tests {
func() {
mux := http.NewServeMux()
HandleMetricsHealth(mux, &fakeServerV2{
fakeServer: fakeServer{alarms: tt.alarms},
Stats: &fakeStats{},
health: tt.health,
})
ts := httptest.NewServer(mux)
defer ts.Close()
res, err := ts.Client().Do(&http.Request{Method: http.MethodGet, URL: testutil.MustNewURL(t, ts.URL+tt.healthCheckURL)})
if err != nil {
t.Errorf("fail serve http request %s %v in test case #%d", tt.healthCheckURL, err, i+1)
}
if res == nil {
t.Errorf("got nil http response with http request %s in test case #%d", tt.healthCheckURL, i+1)
return
}
if res.StatusCode != tt.statusCode {
t.Errorf("want statusCode %d but got %d in test case #%d", tt.statusCode, res.StatusCode, i+1)
}
health, err := parseHealthOutput(res.Body)
if err != nil {
t.Errorf("fail parse health check output %v", err)
}
if health.Health != tt.health {
t.Errorf("want health %s but got %s", tt.health, health.Health)
}
}()
}
}
func parseHealthOutput(body io.Reader) (Health, error) {
obj := Health{}
d, derr := ioutil.ReadAll(body)
if derr != nil {
return obj, derr
}
if err := json.Unmarshal(d, &obj); err != nil {
return obj, err
}
return obj, nil
}

View File

@ -58,7 +58,6 @@ func (c *fakeCluster) Version() *semver.Version { return nil }
type fakeServer struct {
cluster api.Cluster
alarms []*pb.AlarmMember
}
func (s *fakeServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
@ -75,7 +74,7 @@ func (s *fakeServer) PromoteMember(ctx context.Context, id uint64) ([]*membershi
}
func (s *fakeServer) ClusterVersion() *semver.Version { return nil }
func (s *fakeServer) Cluster() api.Cluster { return s.cluster }
func (s *fakeServer) Alarms() []*pb.AlarmMember { return s.alarms }
func (s *fakeServer) Alarms() []*pb.AlarmMember { return nil }
var fakeRaftHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("test data"))

View File

@ -763,21 +763,16 @@ func ValidateClusterAndAssignIDs(lg *zap.Logger, local *RaftCluster, existing *R
if len(ems) != len(lms) {
return fmt.Errorf("member count is unequal")
}
sort.Sort(MembersByPeerURLs(ems))
sort.Sort(MembersByPeerURLs(lms))
ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
defer cancel()
for i := range ems {
var err error
ok := false
for j := range lms {
if ok, err = netutil.URLStringsEqual(ctx, lg, ems[i].PeerURLs, lms[j].PeerURLs); ok {
lms[j].ID = ems[i].ID
break
}
}
if !ok {
return fmt.Errorf("PeerURLs: no match found for existing member (%v, %v), last resolver error (%v)", ems[i].ID, ems[i].PeerURLs, err)
if ok, err := netutil.URLStringsEqual(ctx, lg, ems[i].PeerURLs, lms[i].PeerURLs); !ok {
return fmt.Errorf("unmatched member while checking PeerURLs (%v)", err)
}
lms[i].ID = ems[i].ID
}
local.members = make(map[types.ID]*Member)
for _, m := range lms {

View File

@ -2,14 +2,12 @@
// source: snap.proto
/*
Package snappb is a generated protocol buffer package.
It is generated from these files:
Package snappb is a generated protocol buffer package.
It is generated from these files:
snap.proto
It has these top-level messages:
It has these top-level messages:
Snapshot
*/
package snappb

View File

@ -104,5 +104,5 @@ func TestNodeExternClone(t *testing.T) {
func sameSlice(a, b []*NodeExtern) bool {
ah := (*reflect.SliceHeader)(unsafe.Pointer(&a))
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
return ah.Data == bh.Data && ah.Len == bh.Len && ah.Cap == bh.Cap
return *ah == *bh
}

View File

@ -98,7 +98,7 @@ func TestStoreStatsDeleteFail(t *testing.T) {
testutil.AssertEqual(t, uint64(1), s.Stats.DeleteFail, "")
}
// Ensure that the number of expirations is recorded in the stats.
//Ensure that the number of expirations is recorded in the stats.
func TestStoreStatsExpireCount(t *testing.T) {
s := newStore()
fc := newFakeClock()

View File

@ -844,7 +844,7 @@ func TestStoreWatchSlowConsumer(t *testing.T) {
s.Watch("/foo", true, true, 0) // stream must be true
// Fill watch channel with 100 events
for i := 1; i <= 100; i++ {
s.Set("/foo", false, string(rune(i)), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) // ok
s.Set("/foo", false, string(i), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) // ok
}
// testutil.AssertEqual(t, s.WatcherHub.count, int64(1))
s.Set("/foo", false, "101", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) // ok

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !v2v3
// +build !v2v3
package v2store_test

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build v2v3
// +build v2v3
package v2store_test

View File

@ -41,4 +41,5 @@
// if err != nil {
// // handle error!
// }
//
package v3client

View File

@ -2,14 +2,12 @@
// source: v3election.proto
/*
Package v3electionpb is a generated protocol buffer package.
It is generated from these files:
Package v3electionpb is a generated protocol buffer package.
It is generated from these files:
v3election.proto
It has these top-level messages:
It has these top-level messages:
CampaignRequest
CampaignResponse
LeaderKey

View File

@ -2,14 +2,12 @@
// source: v3lock.proto
/*
Package v3lockpb is a generated protocol buffer package.
It is generated from these files:
Package v3lockpb is a generated protocol buffer package.
It is generated from these files:
v3lock.proto
It has these top-level messages:
It has these top-level messages:
LockRequest
LockResponse
UnlockRequest

View File

@ -31,6 +31,7 @@ import (
const (
grpcOverheadBytes = 512 * 1024
maxStreams = math.MaxUint32
maxSendBytes = math.MaxInt32
)
@ -52,7 +53,7 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config, gopts ...grpc.ServerOptio
)))
opts = append(opts, grpc.MaxRecvMsgSize(int(s.Cfg.MaxRequestBytes+grpcOverheadBytes)))
opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes))
opts = append(opts, grpc.MaxConcurrentStreams(s.Cfg.MaxConcurrentStreams))
opts = append(opts, grpc.MaxConcurrentStreams(maxStreams))
grpcServer := grpc.NewServer(append(opts, gopts...)...)
pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s))

View File

@ -217,8 +217,8 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor
return rpctypes.ErrGRPCNoLeader
}
ctx := newCancellableContext(ss.Context())
ss = serverStreamWithCtx{ctx: ctx, ServerStream: ss}
cctx, cancel := context.WithCancel(ss.Context())
ss = serverStreamWithCtx{ctx: cctx, cancel: &cancel, ServerStream: ss}
smap.mu.Lock()
smap.streams[ss] = struct{}{}
@ -228,8 +228,7 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor
smap.mu.Lock()
delete(smap.streams, ss)
smap.mu.Unlock()
// TODO: investigate whether the reason for cancellation here is useful to know
ctx.Cancel(nil)
cancel()
}()
}
}
@ -238,52 +237,10 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor
}
}
// cancellableContext wraps a context with new cancellable context that allows a
// specific cancellation error to be preserved and later retrieved using the
// Context.Err() function. This is so downstream context users can disambiguate
// the reason for the cancellation which could be from the client (for example)
// or from this interceptor code.
type cancellableContext struct {
context.Context
lock sync.RWMutex
cancel context.CancelFunc
cancelReason error
}
func newCancellableContext(parent context.Context) *cancellableContext {
ctx, cancel := context.WithCancel(parent)
return &cancellableContext{
Context: ctx,
cancel: cancel,
}
}
// Cancel stores the cancellation reason and then delegates to context.WithCancel
// against the parent context.
func (c *cancellableContext) Cancel(reason error) {
c.lock.Lock()
c.cancelReason = reason
c.lock.Unlock()
c.cancel()
}
// Err will return the preserved cancel reason error if present, and will
// otherwise return the underlying error from the parent context.
func (c *cancellableContext) Err() error {
c.lock.RLock()
defer c.lock.RUnlock()
if c.cancelReason != nil {
return c.cancelReason
}
return c.Context.Err()
}
type serverStreamWithCtx struct {
grpc.ServerStream
// ctx is used so that we can preserve a reason for cancellation.
ctx *cancellableContext
ctx context.Context
cancel *context.CancelFunc
}
func (ssc serverStreamWithCtx) Context() context.Context { return ssc.ctx }
@ -315,7 +272,7 @@ func monitorLeader(s *etcdserver.EtcdServer) *streamsMap {
smap.mu.Lock()
for ss := range smap.streams {
if ssWithCtx, ok := ss.(serverStreamWithCtx); ok {
ssWithCtx.ctx.Cancel(rpctypes.ErrGRPCNoLeader)
(*ssWithCtx.cancel)()
<-ss.Context().Done()
}
}

View File

@ -35,8 +35,6 @@ var (
ErrGRPCLeaseExist = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err()
ErrGRPCLeaseTTLTooLarge = status.New(codes.OutOfRange, "etcdserver: too large lease TTL").Err()
ErrGRPCWatchCanceled = status.New(codes.Canceled, "etcdserver: watch canceled").Err()
ErrGRPCMemberExist = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err()
ErrGRPCPeerURLExist = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err()
ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err()
@ -58,14 +56,12 @@ var (
ErrGRPCRoleNotFound = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err()
ErrGRPCRoleEmpty = status.New(codes.InvalidArgument, "etcdserver: role name is empty").Err()
ErrGRPCAuthFailed = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err()
ErrGRPCPermissionNotGiven = status.New(codes.InvalidArgument, "etcdserver: permission not given").Err()
ErrGRPCPermissionDenied = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err()
ErrGRPCRoleNotGranted = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err()
ErrGRPCPermissionNotGranted = status.New(codes.FailedPrecondition, "etcdserver: permission is not granted to the role").Err()
ErrGRPCAuthNotEnabled = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err()
ErrGRPCInvalidAuthToken = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err()
ErrGRPCInvalidAuthMgmt = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err()
ErrGRPCAuthOldRevision = status.New(codes.InvalidArgument, "etcdserver: revision of auth store is old").Err()
ErrGRPCNoLeader = status.New(codes.Unavailable, "etcdserver: no leader").Err()
ErrGRPCNotLeader = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err()
@ -75,7 +71,6 @@ var (
ErrGRPCTimeout = status.New(codes.Unavailable, "etcdserver: request timed out").Err()
ErrGRPCTimeoutDueToLeaderFail = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err()
ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err()
ErrGRPCTimeoutWaitAppliedIndex = status.New(codes.Unavailable, "etcdserver: request timed out, waiting for the applied index took too long").Err()
ErrGRPCUnhealthy = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err()
ErrGRPCCorrupt = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err()
ErrGPRCNotSupportedForLearner = status.New(codes.Unavailable, "etcdserver: rpc not supported for learner").Err()
@ -124,7 +119,6 @@ var (
ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled,
ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken,
ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt,
ErrorDesc(ErrGRPCAuthOldRevision): ErrGRPCAuthOldRevision,
ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
ErrorDesc(ErrGRPCNotLeader): ErrGRPCNotLeader,
@ -134,7 +128,6 @@ var (
ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout,
ErrorDesc(ErrGRPCTimeoutDueToLeaderFail): ErrGRPCTimeoutDueToLeaderFail,
ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost,
ErrorDesc(ErrGRPCTimeoutWaitAppliedIndex): ErrGRPCTimeoutWaitAppliedIndex,
ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy,
ErrorDesc(ErrGRPCCorrupt): ErrGRPCCorrupt,
ErrorDesc(ErrGPRCNotSupportedForLearner): ErrGPRCNotSupportedForLearner,
@ -184,7 +177,6 @@ var (
ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled)
ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken)
ErrAuthOldRevision = Error(ErrGRPCAuthOldRevision)
ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt)
ErrNoLeader = Error(ErrGRPCNoLeader)
@ -195,7 +187,6 @@ var (
ErrTimeout = Error(ErrGRPCTimeout)
ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail)
ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost)
ErrTimeoutWaitAppliedIndex = Error(ErrGRPCTimeoutWaitAppliedIndex)
ErrUnhealthy = Error(ErrGRPCUnhealthy)
ErrCorrupt = Error(ErrGRPCCorrupt)
ErrBadLeaderTransferee = Error(ErrGRPCBadLeaderTransferee)

View File

@ -53,7 +53,6 @@ var toGRPCErrorMap = map[error]error{
etcdserver.ErrTimeout: rpctypes.ErrGRPCTimeout,
etcdserver.ErrTimeoutDueToLeaderFail: rpctypes.ErrGRPCTimeoutDueToLeaderFail,
etcdserver.ErrTimeoutDueToConnectionLost: rpctypes.ErrGRPCTimeoutDueToConnectionLost,
etcdserver.ErrTimeoutWaitAppliedIndex: rpctypes.ErrGRPCTimeoutWaitAppliedIndex,
etcdserver.ErrUnhealthy: rpctypes.ErrGRPCUnhealthy,
etcdserver.ErrKeyNotFound: rpctypes.ErrGRPCKeyNotFound,
etcdserver.ErrCorrupt: rpctypes.ErrGRPCCorrupt,
@ -72,14 +71,12 @@ var toGRPCErrorMap = map[error]error{
auth.ErrRoleNotFound: rpctypes.ErrGRPCRoleNotFound,
auth.ErrRoleEmpty: rpctypes.ErrGRPCRoleEmpty,
auth.ErrAuthFailed: rpctypes.ErrGRPCAuthFailed,
auth.ErrPermissionNotGiven: rpctypes.ErrGRPCPermissionNotGiven,
auth.ErrPermissionDenied: rpctypes.ErrGRPCPermissionDenied,
auth.ErrRoleNotGranted: rpctypes.ErrGRPCRoleNotGranted,
auth.ErrPermissionNotGranted: rpctypes.ErrGRPCPermissionNotGranted,
auth.ErrAuthNotEnabled: rpctypes.ErrGRPCAuthNotEnabled,
auth.ErrInvalidAuthToken: rpctypes.ErrGRPCInvalidAuthToken,
auth.ErrInvalidAuthMgmt: rpctypes.ErrGRPCInvalidAuthMgmt,
auth.ErrAuthOldRevision: rpctypes.ErrGRPCAuthOldRevision,
}
func togRPCError(err error) error {

View File

@ -16,14 +16,12 @@ package v3rpc
import (
"context"
"fmt"
"io"
"math/rand"
"sync"
"time"
"go.etcd.io/etcd/auth"
"go.etcd.io/etcd/clientv3"
"go.etcd.io/etcd/etcdserver"
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
@ -145,10 +143,6 @@ type serverWatchStream struct {
// records fragmented watch IDs
fragment map[mvcc.WatchID]bool
// indicates whether we have an outstanding global progress
// notification to send
deferredProgress bool
// closec indicates the stream is closed.
closec chan struct{}
@ -178,8 +172,6 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
prevKV: make(map[mvcc.WatchID]bool),
fragment: make(map[mvcc.WatchID]bool),
deferredProgress: false,
closec: make(chan struct{}),
}
@ -214,25 +206,15 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
}
}()
// TODO: There's a race here. When a stream is closed (e.g. due to a cancellation),
// the underlying error (e.g. a gRPC stream error) may be returned and handled
// through errc if the recv goroutine finishes before the send goroutine.
// When the recv goroutine wins, the stream error is retained. When recv loses
// the race, the underlying error is lost (unless the root error is propagated
// through Context.Err() which is not always the case (as callers have to decide
// to implement a custom context to do so). The stdlib context package builtins
// may be insufficient to carry semantically useful errors around and should be
// revisited.
select {
case err = <-errc:
if err == context.Canceled {
err = rpctypes.ErrGRPCWatchCanceled
}
close(sws.ctrlStream)
case <-stream.Context().Done():
err = stream.Context().Err()
// the only server-side cancellation is noleader for now.
if err == context.Canceled {
err = rpctypes.ErrGRPCWatchCanceled
err = rpctypes.ErrGRPCNoLeader
}
}
@ -240,16 +222,16 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
return err
}
func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) error {
func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool {
authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context())
if err != nil {
return err
return false
}
if authInfo == nil {
// if auth is enabled, IsRangePermitted() can cause an error
authInfo = &auth.AuthInfo{}
}
return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd)
return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil
}
func (sws *serverWatchStream) recvLoop() error {
@ -283,29 +265,13 @@ func (sws *serverWatchStream) recvLoop() error {
creq.RangeEnd = []byte{}
}
err := sws.isWatchPermitted(creq)
if err != nil {
var cancelReason string
switch err {
case auth.ErrInvalidAuthToken:
cancelReason = rpctypes.ErrGRPCInvalidAuthToken.Error()
case auth.ErrAuthOldRevision:
cancelReason = rpctypes.ErrGRPCAuthOldRevision.Error()
case auth.ErrUserEmpty:
cancelReason = rpctypes.ErrGRPCUserEmpty.Error()
default:
if err != auth.ErrPermissionDenied {
sws.lg.Error("unexpected error code", zap.Error(err))
}
cancelReason = rpctypes.ErrGRPCPermissionDenied.Error()
}
if !sws.isWatchPermitted(creq) {
wr := &pb.WatchResponse{
Header: sws.newResponseHeader(sws.watchStream.Rev()),
WatchId: clientv3.InvalidWatchID,
WatchId: creq.WatchId,
Canceled: true,
Created: true,
CancelReason: cancelReason,
CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(),
}
select {
@ -336,10 +302,7 @@ func (sws *serverWatchStream) recvLoop() error {
sws.fragment[id] = true
}
sws.mu.Unlock()
} else {
id = clientv3.InvalidWatchID
}
wr := &pb.WatchResponse{
Header: sws.newResponseHeader(wsrev),
WatchId: int64(id),
@ -374,17 +337,11 @@ func (sws *serverWatchStream) recvLoop() error {
}
case *pb.WatchRequest_ProgressRequest:
if uv.ProgressRequest != nil {
sws.mu.Lock()
// Ignore if deferred progress notification is already in progress
if !sws.deferredProgress {
// Request progress for all watchers,
// force generation of a response
if !sws.watchStream.RequestProgressAll() {
sws.deferredProgress = true
sws.ctrlStream <- &pb.WatchResponse{
Header: sws.newResponseHeader(sws.watchStream.Rev()),
WatchId: -1, // response is not associated with any WatchId and will be broadcast to all watch channels
}
}
sws.mu.Unlock()
}
default:
// we probably should not shutdown the entire stream when
// receive an valid command.
@ -433,7 +390,7 @@ func (sws *serverWatchStream) sendLoop() {
sws.mu.RUnlock()
for i := range evs {
events[i] = &evs[i]
if needPrevKV && !isCreateEvent(evs[i]) {
if needPrevKV {
opt := mvcc.RangeOptions{Rev: evs[i].Kv.ModRevision - 1}
r, err := sws.watchable.Range(evs[i].Kv.Key, nil, opt)
if err == nil && len(r.KVs) != 0 {
@ -451,16 +408,12 @@ func (sws *serverWatchStream) sendLoop() {
Canceled: canceled,
}
// Progress notifications can have WatchID -1
// if they announce on behalf of multiple watchers
if wresp.WatchID != clientv3.InvalidWatchID {
if _, okID := ids[wresp.WatchID]; !okID {
// buffer if id not yet announced
wrs := append(pending[wresp.WatchID], wr)
pending[wresp.WatchID] = wrs
continue
}
}
mvcc.ReportEventReceived(len(evs))
@ -498,11 +451,6 @@ func (sws *serverWatchStream) sendLoop() {
// elide next progress update if sent a key update
sws.progress[wresp.WatchID] = false
}
if sws.deferredProgress {
if sws.watchStream.RequestProgressAll() {
sws.deferredProgress = false
}
}
sws.mu.Unlock()
case c, ok := <-sws.ctrlStream:
@ -530,12 +478,7 @@ func (sws *serverWatchStream) sendLoop() {
// track id creation
wid := mvcc.WatchID(c.WatchId)
if !(!(c.Canceled && c.Created) || wid == clientv3.InvalidWatchID) {
panic(fmt.Sprintf("unexpected watchId: %d, wanted: %d, since both 'Canceled' and 'Created' are true", wid, clientv3.InvalidWatchID))
}
if c.Canceled && wid != clientv3.InvalidWatchID {
if c.Canceled {
delete(ids, wid)
continue
}
@ -581,10 +524,6 @@ func (sws *serverWatchStream) sendLoop() {
}
}
func isCreateEvent(e mvccpb.Event) bool {
return e.Type == mvccpb.PUT && e.Kv.CreateRevision == e.Kv.ModRevision
}
func sendFragments(
wr *pb.WatchResponse,
maxRequestBytes int,

View File

@ -33,6 +33,10 @@ import (
"go.uber.org/zap"
)
const (
warnApplyDuration = 100 * time.Millisecond
)
type applyResult struct {
resp proto.Message
err error
@ -111,7 +115,7 @@ func (s *EtcdServer) newApplierV3() applierV3 {
func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult {
ar := &applyResult{}
defer func(start time.Time) {
warnOfExpensiveRequest(a.s.getLogger(), a.s.Cfg.WarningApplyDuration, start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err)
warnOfExpensiveRequest(a.s.getLogger(), start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err)
if ar.err != nil {
warnOfFailedRequest(a.s.getLogger(), start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err)
}
@ -181,7 +185,7 @@ func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.Pu
trace = traceutil.New("put",
a.s.getLogger(),
traceutil.Field{Key: "key", Value: string(p.Key)},
traceutil.Field{Key: "req_size", Value: p.Size()},
traceutil.Field{Key: "req_size", Value: proto.Size(p)},
)
val, leaseID := p.Value, lease.LeaseID(p.Lease)
if txn == nil {

View File

@ -176,26 +176,15 @@ func (aa *authApplierV3) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevoke
}
func (aa *authApplierV3) checkLeasePuts(leaseID lease.LeaseID) error {
l := aa.lessor.Lookup(leaseID)
if l != nil {
return aa.checkLeasePutsKeys(l)
}
return nil
}
func (aa *authApplierV3) checkLeasePutsKeys(l *lease.Lease) error {
// early return for most-common scenario of either disabled auth or admin user.
// IsAdminPermitted also checks whether auth is enabled
if err := aa.as.IsAdminPermitted(&aa.authInfo); err == nil {
return nil
}
for _, key := range l.Keys() {
lease := aa.lessor.Lookup(leaseID)
if lease != nil {
for _, key := range lease.Keys() {
if err := aa.as.IsPutPermitted(&aa.authInfo, []byte(key)); err != nil {
return err
}
}
}
return nil
}

View File

@ -1,115 +0,0 @@
// Copyright 2023 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdserver
import (
"testing"
"time"
"go.etcd.io/etcd/auth"
"go.etcd.io/etcd/auth/authpb"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"go.etcd.io/etcd/lease"
"golang.org/x/crypto/bcrypt"
betesting "go.etcd.io/etcd/mvcc/backend"
"github.com/stretchr/testify/assert"
"go.uber.org/zap/zaptest"
)
func TestCheckLeasePutsKeys(t *testing.T) {
lg := zaptest.NewLogger(t)
b, _ := betesting.NewDefaultTmpBackend()
defer b.Close()
simpleTokenTTLDefault := 300 * time.Second
tokenTypeSimple := "simple"
dummyIndexWaiter := func(index uint64) <-chan struct{} {
ch := make(chan struct{}, 1)
go func() {
ch <- struct{}{}
}()
return ch
}
tp, _ := auth.NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
as := auth.NewAuthStore(lg, b, tp, bcrypt.MinCost)
aa := authApplierV3{as: as}
assert.NoError(t, aa.checkLeasePutsKeys(lease.NewLease(lease.LeaseID(1), 3600)), "auth is disabled, should allow puts")
assert.NoError(t, enableAuthAndCreateRoot(aa.as), "error while enabling auth")
aa.authInfo = auth.AuthInfo{Username: "root"}
assert.NoError(t, aa.checkLeasePutsKeys(lease.NewLease(lease.LeaseID(1), 3600)), "auth is enabled, should allow puts for root")
l := lease.NewLease(lease.LeaseID(1), 3600)
l.SetLeaseItem(lease.LeaseItem{Key: "a"})
aa.authInfo = auth.AuthInfo{Username: "bob", Revision: 0}
assert.ErrorIs(t, aa.checkLeasePutsKeys(l), auth.ErrUserEmpty, "auth is enabled, should not allow bob, non existing at rev 0")
aa.authInfo = auth.AuthInfo{Username: "bob", Revision: 1}
assert.ErrorIs(t, aa.checkLeasePutsKeys(l), auth.ErrAuthOldRevision, "auth is enabled, old revision")
aa.authInfo = auth.AuthInfo{Username: "bob", Revision: aa.as.Revision()}
assert.ErrorIs(t, aa.checkLeasePutsKeys(l), auth.ErrPermissionDenied, "auth is enabled, bob does not have permissions, bob does not exist")
_, err := aa.as.UserAdd(&pb.AuthUserAddRequest{Name: "bob", Options: &authpb.UserAddOptions{NoPassword: true}})
assert.NoError(t, err, "bob should be added without error")
aa.authInfo = auth.AuthInfo{Username: "bob", Revision: aa.as.Revision()}
assert.ErrorIs(t, aa.checkLeasePutsKeys(l), auth.ErrPermissionDenied, "auth is enabled, bob exists yet does not have permissions")
// allow bob to access "a"
_, err = aa.as.RoleAdd(&pb.AuthRoleAddRequest{Name: "bobsrole"})
assert.NoError(t, err, "bobsrole should be added without error")
_, err = aa.as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{
Name: "bobsrole",
Perm: &authpb.Permission{
PermType: authpb.READWRITE,
Key: []byte("a"),
RangeEnd: nil,
},
})
assert.NoError(t, err, "bobsrole should be granted permissions without error")
_, err = aa.as.UserGrantRole(&pb.AuthUserGrantRoleRequest{
User: "bob",
Role: "bobsrole",
})
assert.NoError(t, err, "bob should be granted bobsrole without error")
aa.authInfo = auth.AuthInfo{Username: "bob", Revision: aa.as.Revision()}
assert.NoError(t, aa.checkLeasePutsKeys(l), "bob should be able to access key 'a'")
}
func enableAuthAndCreateRoot(as auth.AuthStore) error {
_, err := as.UserAdd(&pb.AuthUserAddRequest{
Name: "root",
Password: "root",
Options: &authpb.UserAddOptions{NoPassword: false}})
if err != nil {
return err
}
_, err = as.RoleAdd(&pb.AuthRoleAddRequest{Name: "root"})
if err != nil {
return err
}
_, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: "root", Role: "root"})
if err != nil {
return err
}
return as.AuthEnable()
}

View File

@ -119,7 +119,7 @@ func (s *EtcdServer) applyV2Request(r *RequestV2) Response {
stringer: r,
alternative: func() string { return fmt.Sprintf("id:%d,method:%s,path:%s", r.ID, r.Method, r.Path) },
}
defer warnOfExpensiveRequest(s.getLogger(), s.Cfg.WarningApplyDuration, time.Now(), stringer, nil, nil)
defer warnOfExpensiveRequest(s.getLogger(), time.Now(), stringer, nil, nil)
switch r.Method {
case "POST":

View File

@ -119,12 +119,6 @@ type ServerConfig struct {
// MaxRequestBytes is the maximum request size to send over raft.
MaxRequestBytes uint
// MaxConcurrentStreams specifies the maximum number of concurrent
// streams that each client can open at a time.
MaxConcurrentStreams uint32
WarningApplyDuration time.Duration
StrictReconfigCheck bool
// ClientCertAuthEnabled is true when cert has been signed by the client CA.
@ -158,12 +152,10 @@ type ServerConfig struct {
ForceNewCluster bool
// EnableLeaseCheckpoint enables leader to send regular checkpoints to other members to prevent reset of remaining TTL on leader change.
// EnableLeaseCheckpoint enables primary lessor to persist lease remainingTTL to prevent indefinite auto-renewal of long lived leases.
EnableLeaseCheckpoint bool
// LeaseCheckpointInterval time.Duration is the wait duration between lease checkpoints.
LeaseCheckpointInterval time.Duration
// LeaseCheckpointPersist enables persisting remainingTTL to prevent indefinite auto-renewal of long lived leases. Always enabled in v3.6. Should be used to ensure smooth upgrade from v3.5 clusters with this feature enabled.
LeaseCheckpointPersist bool
EnableGRPCGateway bool

View File

@ -202,17 +202,13 @@ func (s *EtcdServer) checkHashKV() error {
}
alarmed := false
mismatch := func(id types.ID) {
mismatch := func(id uint64) {
if alarmed {
return
}
alarmed = true
// It isn't clear which member's data is corrupted, so we
// intentionally set the memberID as 0. We will identify
// the corrupted members using quorum in 3.6. Please see
// discussion in https://github.com/etcd-io/etcd/pull/14828.
a := &pb.AlarmRequest{
MemberID: 0,
MemberID: id,
Action: pb.AlarmRequest_ACTIVATE,
Alarm: pb.AlarmType_CORRUPT,
}
@ -235,7 +231,7 @@ func (s *EtcdServer) checkHashKV() error {
} else {
plog.Warningf("mismatched hashes %d and %d for revision %d", h, h2, rev)
}
mismatch(s.ID())
mismatch(uint64(s.ID()))
}
checkedCount := 0
@ -244,6 +240,7 @@ func (s *EtcdServer) checkHashKV() error {
continue
}
checkedCount++
id := p.resp.Header.MemberId
// leader expects follower's latest revision less than or equal to leader's
if p.resp.Header.Revision > rev2 {
@ -252,16 +249,16 @@ func (s *EtcdServer) checkHashKV() error {
"revision from follower must be less than or equal to leader's",
zap.Int64("leader-revision", rev2),
zap.Int64("follower-revision", p.resp.Header.Revision),
zap.String("follower-peer-id", p.id.String()),
zap.String("follower-peer-id", types.ID(id).String()),
)
} else {
plog.Warningf(
"revision %d from member %v, expected at most %d",
p.resp.Header.Revision,
p.id,
types.ID(id),
rev2)
}
mismatch(p.id)
mismatch(id)
}
// leader expects follower's latest compact revision less than or equal to leader's
@ -271,17 +268,17 @@ func (s *EtcdServer) checkHashKV() error {
"compact revision from follower must be less than or equal to leader's",
zap.Int64("leader-compact-revision", crev2),
zap.Int64("follower-compact-revision", p.resp.CompactRevision),
zap.String("follower-peer-id", p.id.String()),
zap.String("follower-peer-id", types.ID(id).String()),
)
} else {
plog.Warningf(
"compact revision %d from member %v, expected at most %d",
p.resp.CompactRevision,
p.id,
types.ID(id),
crev2,
)
}
mismatch(p.id)
mismatch(id)
}
// follower's compact revision is leader's old one, then hashes must match
@ -293,18 +290,18 @@ func (s *EtcdServer) checkHashKV() error {
zap.Uint32("leader-hash", h),
zap.Int64("follower-compact-revision", p.resp.CompactRevision),
zap.Uint32("follower-hash", p.resp.Hash),
zap.String("follower-peer-id", p.id.String()),
zap.String("follower-peer-id", types.ID(id).String()),
)
} else {
plog.Warningf(
"hash %d at revision %d from member %v, expected hash %d",
p.resp.Hash,
rev,
p.id,
types.ID(id),
h,
)
}
mismatch(p.id)
mismatch(id)
}
}
if lg != nil {

View File

@ -26,7 +26,6 @@ var (
ErrTimeout = errors.New("etcdserver: request timed out")
ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure")
ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost")
ErrTimeoutWaitAppliedIndex = errors.New("etcdserver: request timed out, waiting for the applied index took too long")
ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long")
ErrLeaderChanged = errors.New("etcdserver: leader changed")
ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members")

View File

@ -2,16 +2,14 @@
// source: etcdserver.proto
/*
Package etcdserverpb is a generated protocol buffer package.
It is generated from these files:
Package etcdserverpb is a generated protocol buffer package.
It is generated from these files:
etcdserver.proto
raft_internal.proto
rpc.proto
It has these top-level messages:
It has these top-level messages:
Request
Metadata
RequestHeader

View File

@ -135,14 +135,8 @@ func NewBackendQuota(s *EtcdServer, name string) Quota {
}
func (b *backendQuota) Available(v interface{}) bool {
cost := b.Cost(v)
// if there are no mutating requests, it's safe to pass through
if cost == 0 {
return true
}
// TODO: maybe optimize backend.Size()
return b.s.Backend().Size()+int64(cost) < b.maxBackendBytes
return b.s.Backend().Size()+int64(b.Cost(v)) < b.maxBackendBytes
}
func (b *backendQuota) Cost(v interface{}) int {

View File

@ -215,18 +215,6 @@ func (r *raftNode) start(rh *raftReadyHandler) {
notifyc: notifyc,
}
waitWALSync := shouldWaitWALSync(rd)
if waitWALSync {
// gofail: var raftBeforeSaveWaitWalSync struct{}
if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
if r.lg != nil {
r.lg.Fatal("failed to save Raft hard state and entries", zap.Error(err))
} else {
plog.Fatalf("failed to save state and entries error: %v", err)
}
}
}
updateCommittedIndex(&ap, rh)
select {
@ -257,7 +245,6 @@ func (r *raftNode) start(rh *raftReadyHandler) {
// gofail: var raftAfterSaveSnap struct{}
}
if !waitWALSync {
// gofail: var raftBeforeSave struct{}
if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
if r.lg != nil {
@ -266,7 +253,6 @@ func (r *raftNode) start(rh *raftReadyHandler) {
plog.Fatalf("failed to save state and entries error: %v", err)
}
}
}
if !raft.IsEmptyHardState(rd.HardState) {
proposalsCommitted.Set(float64(rd.HardState.Commit))
}
@ -356,43 +342,6 @@ func (r *raftNode) start(rh *raftReadyHandler) {
}()
}
// For a cluster with only one member, the raft may send both the
// unstable entries and committed entries to etcdserver, and there
// may have overlapped log entries between them.
//
// etcd responds to the client once it finishes (actually partially)
// the applying workflow. But when the client receives the response,
// it doesn't mean etcd has already successfully saved the data,
// including BoltDB and WAL, because:
// 1. etcd commits the boltDB transaction periodically instead of on each request;
// 2. etcd saves WAL entries in parallel with applying the committed entries.
//
// Accordingly, it might run into a situation of data loss when the etcd crashes
// immediately after responding to the client and before the boltDB and WAL
// successfully save the data to disk.
// Note that this issue can only happen for clusters with only one member.
//
// For clusters with multiple members, it isn't an issue, because etcd will
// not commit & apply the data before it being replicated to majority members.
// When the client receives the response, it means the data must have been applied.
// It further means the data must have been committed.
// Note: for clusters with multiple members, the raft will never send identical
// unstable entries and committed entries to etcdserver.
//
// Refer to https://github.com/etcd-io/etcd/issues/14370.
func shouldWaitWALSync(rd raft.Ready) bool {
if len(rd.CommittedEntries) == 0 || len(rd.Entries) == 0 {
return false
}
// Check if there is overlap between unstable and committed entries
// assuming that their index and term are only incrementing.
lastCommittedEntry := rd.CommittedEntries[len(rd.CommittedEntries)-1]
firstUnstableEntry := rd.Entries[0]
return lastCommittedEntry.Term > firstUnstableEntry.Term ||
(lastCommittedEntry.Term == firstUnstableEntry.Term && lastCommittedEntry.Index >= firstUnstableEntry.Index)
}
func updateCommittedIndex(ap *apply, rh *raftReadyHandler) {
var ci uint64
if len(ap.entries) != 0 {

View File

@ -21,7 +21,6 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"go.etcd.io/etcd/etcdserver/api/membership"
"go.etcd.io/etcd/pkg/mock/mockstorage"
"go.etcd.io/etcd/pkg/pbutil"
@ -268,79 +267,3 @@ func TestProcessDuplicatedAppRespMessage(t *testing.T) {
t.Errorf("count = %d, want %d", got, want)
}
}
func TestShouldWaitWALSync(t *testing.T) {
testcases := []struct {
name string
unstableEntries []raftpb.Entry
commitedEntries []raftpb.Entry
expectedResult bool
}{
{
name: "both entries are nil",
unstableEntries: nil,
commitedEntries: nil,
expectedResult: false,
},
{
name: "both entries are empty slices",
unstableEntries: []raftpb.Entry{},
commitedEntries: []raftpb.Entry{},
expectedResult: false,
},
{
name: "one nil and the other empty",
unstableEntries: nil,
commitedEntries: []raftpb.Entry{},
expectedResult: false,
},
{
name: "one nil and the other has data",
unstableEntries: nil,
commitedEntries: []raftpb.Entry{{Term: 4, Index: 10, Type: raftpb.EntryNormal, Data: []byte{0x11, 0x22, 0x33}}},
expectedResult: false,
},
{
name: "one empty and the other has data",
unstableEntries: []raftpb.Entry{},
commitedEntries: []raftpb.Entry{{Term: 4, Index: 10, Type: raftpb.EntryNormal, Data: []byte{0x11, 0x22, 0x33}}},
expectedResult: false,
},
{
name: "has different term and index",
unstableEntries: []raftpb.Entry{{Term: 5, Index: 11, Type: raftpb.EntryNormal, Data: []byte{0x11, 0x22, 0x33}}},
commitedEntries: []raftpb.Entry{{Term: 4, Index: 10, Type: raftpb.EntryNormal, Data: []byte{0x11, 0x22, 0x33}}},
expectedResult: false,
},
{
name: "has identical data",
unstableEntries: []raftpb.Entry{{Term: 4, Index: 10, Type: raftpb.EntryNormal, Data: []byte{0x11, 0x22, 0x33}}},
commitedEntries: []raftpb.Entry{{Term: 4, Index: 10, Type: raftpb.EntryNormal, Data: []byte{0x11, 0x22, 0x33}}},
expectedResult: true,
},
{
name: "has overlapped entry",
unstableEntries: []raftpb.Entry{
{Term: 4, Index: 10, Type: raftpb.EntryNormal, Data: []byte{0x11, 0x22, 0x33}},
{Term: 4, Index: 11, Type: raftpb.EntryNormal, Data: []byte{0x44, 0x55, 0x66}},
{Term: 4, Index: 12, Type: raftpb.EntryNormal, Data: []byte{0x77, 0x88, 0x99}},
},
commitedEntries: []raftpb.Entry{
{Term: 4, Index: 8, Type: raftpb.EntryNormal, Data: []byte{0x07, 0x08, 0x09}},
{Term: 4, Index: 9, Type: raftpb.EntryNormal, Data: []byte{0x10, 0x11, 0x12}},
{Term: 4, Index: 10, Type: raftpb.EntryNormal, Data: []byte{0x11, 0x22, 0x33}},
},
expectedResult: true,
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
shouldWALSync := shouldWaitWALSync(raft.Ready{
Entries: tc.unstableEntries,
CommittedEntries: tc.commitedEntries,
})
assert.Equal(t, tc.expectedResult, shouldWALSync)
})
}
}

View File

@ -25,7 +25,6 @@ import (
"os"
"path"
"regexp"
"strings"
"sync"
"sync/atomic"
"time"
@ -259,6 +258,10 @@ type EtcdServer struct {
peerRt http.RoundTripper
reqIDGen *idutil.Generator
// forceVersionC is used to force the version monitor loop
// to detect the cluster version immediately.
forceVersionC chan struct{}
// wgMu blocks concurrent waitgroup mutation while server stopping
wgMu sync.RWMutex
// wg is used to wait for the go routines that depends on the server state
@ -273,9 +276,6 @@ type EtcdServer struct {
leadTimeMu sync.RWMutex
leadElectedTime time.Time
firstCommitInTermMu sync.RWMutex
firstCommitInTermC chan struct{}
*AccessController
}
@ -323,17 +323,6 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
plog.Fatalf("create snapshot directory error: %v", err)
}
}
if err = fileutil.RemoveMatchFile(cfg.Logger, cfg.SnapDir(), func(fileName string) bool {
return strings.HasPrefix(fileName, "tmp")
}); err != nil {
cfg.Logger.Error(
"failed to remove temp file(s) in snapshot directory",
zap.String("path", cfg.SnapDir()),
zap.Error(err),
)
}
ss := snap.New(cfg.Logger, cfg.SnapDir())
bepath := cfg.backendPath()
@ -539,8 +528,8 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
SyncTicker: time.NewTicker(500 * time.Millisecond),
peerRt: prt,
reqIDGen: idutil.NewGenerator(uint16(id), time.Now()),
forceVersionC: make(chan struct{}),
AccessController: &AccessController{CORS: cfg.CORS, HostWhitelist: cfg.HostWhitelist},
firstCommitInTermC: make(chan struct{}),
}
serverID.With(prometheus.Labels{"server_id": id.String()}).Set(1)
@ -554,11 +543,9 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
srv.lessor = lease.NewLessor(
srv.getLogger(),
srv.be,
srv.cluster,
lease.LessorConfig{
MinLeaseTTL: int64(math.Ceil(minTTL.Seconds())),
CheckpointInterval: cfg.LeaseCheckpointInterval,
CheckpointPersist: cfg.LeaseCheckpointPersist,
ExpiredLeasesRetryInterval: srv.Cfg.ReqTimeout(),
})
@ -1072,42 +1059,19 @@ func (s *EtcdServer) run() {
f := func(context.Context) { s.applyAll(&ep, &ap) }
sched.Schedule(f)
case leases := <-expiredLeaseC:
s.revokeExpiredLeases(leases)
case err := <-s.errorc:
if lg != nil {
lg.Warn("server error", zap.Error(err))
lg.Warn("data-dir used by this member must be removed")
} else {
plog.Errorf("%s", err)
plog.Infof("the data-dir used by this member must be removed.")
}
return
case <-getSyncC():
if s.v2store.HasTTLKeys() {
s.sync(s.Cfg.ReqTimeout())
}
case <-s.stop:
return
}
}
}
func (s *EtcdServer) revokeExpiredLeases(leases []*lease.Lease) {
s.goAttach(func() {
lg := s.Logger()
// Increases throughput of expired leases deletion process through parallelization
c := make(chan struct{}, maxPendingRevokes)
for _, curLease := range leases {
for _, lease := range leases {
select {
case c <- struct{}{}:
case <-s.stopping:
return
}
f := func(lid int64) {
lid := lease.ID
s.goAttach(func() {
ctx := s.authStore.WithRoot(s.ctx)
_, lerr := s.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lid})
_, lerr := s.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: int64(lid)})
if lerr == nil {
leaseExpired.Inc()
} else {
@ -1125,10 +1089,24 @@ func (s *EtcdServer) revokeExpiredLeases(leases []*lease.Lease) {
<-c
})
}
f(int64(curLease.ID))
}
})
case err := <-s.errorc:
if lg != nil {
lg.Warn("server error", zap.Error(err))
lg.Warn("data-dir used by this member must be removed")
} else {
plog.Errorf("%s", err)
plog.Infof("the data-dir used by this member must be removed.")
}
return
case <-getSyncC():
if s.v2store.HasTTLKeys() {
s.sync(s.Cfg.ReqTimeout())
}
case <-s.stop:
return
}
}
}
func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) {
@ -1798,10 +1776,6 @@ func (s *EtcdServer) mayPromoteMember(id types.ID) error {
// Note: it will return nil if member is not found in cluster or if member is not learner.
// These two conditions will be checked before apply phase later.
func (s *EtcdServer) isLearnerReady(id uint64) error {
if err := s.waitAppliedIndex(); err != nil {
return err
}
rs := s.raftStatus()
// leader's raftStatus.Progress is not nil
@ -1821,17 +1795,13 @@ func (s *EtcdServer) isLearnerReady(id uint64) error {
}
}
// We should return an error in API directly, to avoid the request
// being unnecessarily delivered to raft.
if !isFound {
return membership.ErrIDNotFound
}
if isFound {
leaderMatch := rs.Progress[leaderID].Match
// the learner's Match not caught up with leader yet
if float64(learnerMatch) < float64(leaderMatch)*readyPercent {
return ErrLearnerNotReady
}
}
return nil
}
@ -1942,16 +1912,6 @@ func (s *EtcdServer) leaderChangedNotify() <-chan struct{} {
return s.leaderChanged
}
// FirstCommitInTermNotify returns channel that will be unlocked on first
// entry committed in new term, which is necessary for new leader to answer
// read-only requests (leader is not able to respond any read-only requests
// as long as linearizable semantic is required)
func (s *EtcdServer) FirstCommitInTermNotify() <-chan struct{} {
s.firstCommitInTermMu.RLock()
defer s.firstCommitInTermMu.RUnlock()
return s.firstCommitInTermC
}
// RaftStatusGetter represents etcd server and Raft progress.
type RaftStatusGetter interface {
ID() types.ID
@ -2219,8 +2179,10 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
// raft state machine may generate noop entry when leader confirmation.
// skip it in advance to avoid some potential bug in the future
if len(e.Data) == 0 {
s.notifyAboutFirstCommitInTerm()
select {
case s.forceVersionC <- struct{}{}:
default:
}
// promote lessor when the local member is leader and finished
// applying all entries from the last term.
if s.isLeader() {
@ -2293,15 +2255,6 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
})
}
func (s *EtcdServer) notifyAboutFirstCommitInTerm() {
newNotifier := make(chan struct{})
s.firstCommitInTermMu.Lock()
notifierToClose := s.firstCommitInTermC
s.firstCommitInTermC = newNotifier
s.firstCommitInTermMu.Unlock()
close(notifierToClose)
}
// applyConfChange applies a ConfChange to the server. It is only
// invoked with a ConfChange that has already passed through Raft
func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState) (bool, error) {
@ -2528,7 +2481,7 @@ func (s *EtcdServer) ClusterVersion() *semver.Version {
func (s *EtcdServer) monitorVersions() {
for {
select {
case <-s.FirstCommitInTermNotify():
case <-s.forceVersionC:
case <-time.After(monitorVersionInterval):
case <-s.stopping:
return

Some files were not shown because too many files have changed in this diff Show More