Compare commits

..

1 Commits

Author SHA1 Message Date
Vitaliy Filippov 416381529b etcdserver: Fix 64 KB websocket notification message limit
This fixes etcd being unable to send any message longer than 64 KB as
a notification over the websocket. This was because the older version
of grpc-websocket-proxy was used and WithMaxRespBodyBufferSize option
wasn't set.
2020-10-19 15:20:44 +03:00
1339 changed files with 468613 additions and 8725 deletions

View File

@ -1,32 +0,0 @@
name: Release
on: [push, pull_request]
jobs:
release:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- id: goversion
run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT"
- uses: actions/setup-go@v2
with:
go-version: ${{ steps.goversion.outputs.goversion }}
- name: release
run: |
set -euo pipefail
git config --global user.email "github-action@etcd.io"
git config --global user.name "Github Action"
gpg --batch --gen-key <<EOF
%no-protection
Key-Type: 1
Key-Length: 2048
Subkey-Type: 1
Subkey-Length: 2048
Name-Real: Github Action
Name-Email: github-action@etcd.io
Expire-Date: 0
EOF
DRY_RUN=true ./scripts/release.sh --no-upload --no-docker-push --in-place 3.4.99
- name: test-image
run: |
VERSION=3.4.99 ./scripts/test_images.sh

View File

@ -1,78 +0,0 @@
name: Tests
on: [push, pull_request]
jobs:
test:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
target:
- linux-amd64-fmt
- linux-amd64-integration-1-cpu
- linux-amd64-integration-2-cpu
- linux-amd64-integration-4-cpu
- linux-amd64-functional
- linux-amd64-unit-4-cpu-race
- all-build
- linux-amd64-grpcproxy
- linux-amd64-e2e
- linux-386-unit
steps:
- uses: actions/checkout@v2
- id: goversion
run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT"
- uses: actions/setup-go@v2
with:
go-version: ${{ steps.goversion.outputs.goversion }}
- run: date
- env:
TARGET: ${{ matrix.target }}
run: |
set -euo pipefail
go version
echo ${GOROOT}
echo "${TARGET}"
case "${TARGET}" in
linux-amd64-fmt)
GOARCH=amd64 PASSES='fmt bom dep' ./test
;;
linux-amd64-integration-1-cpu)
GOARCH=amd64 CPU=1 PASSES='integration' RACE='false' ./test
;;
linux-amd64-integration-2-cpu)
GOARCH=amd64 CPU=2 PASSES='integration' RACE='false' ./test
;;
linux-amd64-integration-4-cpu)
GOARCH=amd64 CPU=4 PASSES='integration' RACE='false' ./test
;;
linux-amd64-functional)
./build && GOARCH=amd64 PASSES='functional' ./test
;;
linux-amd64-unit-4-cpu-race)
GOARCH=amd64 PASSES='unit' RACE='true' CPU='4' ./test -p=2
;;
all-build)
GOARCH=amd64 PASSES='build' ./test
GOARCH=386 PASSES='build' ./test
GO_BUILD_FLAGS='-v' GOOS=darwin GOARCH=amd64 ./build
GO_BUILD_FLAGS='-v' GOOS=windows GOARCH=amd64 ./build
GO_BUILD_FLAGS='-v' GOARCH=arm ./build
GO_BUILD_FLAGS='-v' GOARCH=arm64 ./build
GO_BUILD_FLAGS='-v' GOARCH=ppc64le ./build
GO_BUILD_FLAGS='-v' GOARCH=s390x ./build
;;
linux-amd64-grpcproxy)
PASSES='build grpcproxy' CPU='4' RACE='true' ./test
;;
linux-amd64-e2e)
GOARCH=amd64 PASSES='build release e2e' ./test
;;
linux-386-unit)
GOARCH=386 PASSES='unit' ./test
;;
*)
echo "Failed to find target"
exit 1
;;
esac

View File

@ -1,37 +0,0 @@
name: Trivy Nightly Scan
on:
schedule:
- cron: '0 2 * * *' # run at 2 AM UTC
permissions: read-all
jobs:
nightly-scan:
name: Trivy Scan nightly
strategy:
fail-fast: false
matrix:
# maintain the versions of etcd that need to be actively
# security scanned
versions: [v3.4.22]
permissions:
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0
with:
ref: release-3.4
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@9ab158e8597f3b310480b9a69402b419bc03dbd5 # master
with:
image-ref: 'gcr.io/etcd-development/etcd:${{ matrix.versions }}'
severity: 'CRITICAL,HIGH'
format: 'template'
template: '@/contrib/sarif.tpl'
output: 'trivy-results-3-4.sarif'
- name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@a669cc5936cc5e1b6a362ec1ff9e410dc570d190 # v2.1.36
with:
sarif_file: 'trivy-results-3-4.sarif'

1
.gitignore vendored
View File

@ -31,7 +31,6 @@ vendor/**/*
!vendor/**/License* !vendor/**/License*
!vendor/**/LICENCE* !vendor/**/LICENCE*
!vendor/**/LICENSE* !vendor/**/LICENSE*
!vendor/modules.txt
vendor/**/*_test.go vendor/**/*_test.go
*.bak *.bak

View File

@ -1 +0,0 @@
1.19.10

94
.travis.yml Normal file
View File

@ -0,0 +1,94 @@
language: go
go_import_path: go.etcd.io/etcd
sudo: required
services: docker
go:
- 1.12.12
notifications:
on_success: never
on_failure: never
env:
matrix:
- TARGET=linux-amd64-fmt
- TARGET=linux-amd64-integration-1-cpu
- TARGET=linux-amd64-integration-2-cpu
- TARGET=linux-amd64-integration-4-cpu
- TARGET=linux-amd64-functional
- TARGET=linux-amd64-unit
- TARGET=all-build
- TARGET=linux-amd64-grpcproxy
- TARGET=linux-386-unit
matrix:
fast_finish: true
allow_failures:
- go: 1.12.12
env: TARGET=linux-amd64-grpcproxy
- go: 1.12.12
env: TARGET=linux-386-unit
before_install:
- if [[ $TRAVIS_GO_VERSION == 1.* ]]; then docker pull gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION}; fi
install:
- go get -t -v -d ./...
script:
- echo "TRAVIS_GO_VERSION=${TRAVIS_GO_VERSION}"
- >
case "${TARGET}" in
linux-amd64-fmt)
docker run --rm \
--volume=`pwd`:/go/src/go.etcd.io/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
/bin/bash -c "GOARCH=amd64 PASSES='fmt bom dep' ./test"
;;
linux-amd64-integration-1-cpu)
docker run --rm \
--volume=`pwd`:/go/src/go.etcd.io/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
/bin/bash -c "GOARCH=amd64 CPU=1 PASSES='integration' ./test"
;;
linux-amd64-integration-2-cpu)
docker run --rm \
--volume=`pwd`:/go/src/go.etcd.io/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
/bin/bash -c "GOARCH=amd64 CPU=2 PASSES='integration' ./test"
;;
linux-amd64-integration-4-cpu)
docker run --rm \
--volume=`pwd`:/go/src/go.etcd.io/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
/bin/bash -c "GOARCH=amd64 CPU=4 PASSES='integration' ./test"
;;
linux-amd64-functional)
docker run --rm \
--volume=`pwd`:/go/src/go.etcd.io/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
/bin/bash -c "./build && GOARCH=amd64 PASSES='functional' ./test"
;;
linux-amd64-unit)
docker run --rm \
--volume=`pwd`:/go/src/go.etcd.io/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
/bin/bash -c "GOARCH=amd64 PASSES='unit' ./test"
;;
all-build)
docker run --rm \
--volume=`pwd`:/go/src/go.etcd.io/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
/bin/bash -c "GOARCH=amd64 PASSES='build' ./test \
&& GOARCH=386 PASSES='build' ./test \
&& GO_BUILD_FLAGS='-v' GOOS=darwin GOARCH=amd64 ./build \
&& GO_BUILD_FLAGS='-v' GOOS=windows GOARCH=amd64 ./build \
&& GO_BUILD_FLAGS='-v' GOARCH=arm ./build \
&& GO_BUILD_FLAGS='-v' GOARCH=arm64 ./build \
&& GO_BUILD_FLAGS='-v' GOARCH=ppc64le ./build"
;;
linux-amd64-grpcproxy)
sudo HOST_TMP_DIR=/tmp TEST_OPTS="PASSES='build grpcproxy'" make docker-test
;;
linux-386-unit)
docker run --rm \
--volume=`pwd`:/go/src/go.etcd.io/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
/bin/bash -c "GOARCH=386 PASSES='unit' ./test"
;;
esac

View File

@ -1,10 +1,15 @@
FROM --platform=linux/amd64 gcr.io/distroless/static-debian11 FROM k8s.gcr.io/debian-base:v1.0.0
ADD etcd /usr/local/bin/ ADD etcd /usr/local/bin/
ADD etcdctl /usr/local/bin/ ADD etcdctl /usr/local/bin/
RUN mkdir -p /var/etcd/
RUN mkdir -p /var/lib/etcd/
WORKDIR /var/etcd/ # Alpine Linux doesn't use pam, which means that there is no /etc/nsswitch.conf,
WORKDIR /var/lib/etcd/ # but Golang relies on /etc/nsswitch.conf to check the order of DNS resolving
# (see https://github.com/golang/go/commit/9dee7771f561cf6aee081c0af6658cc81fac3918)
# To fix this we just create /etc/nsswitch.conf and add the following line:
RUN echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf
EXPOSE 2379 2380 EXPOSE 2379 2380

View File

@ -1,10 +1,9 @@
FROM --platform=linux/arm64 gcr.io/distroless/static-debian11 FROM k8s.gcr.io/debian-base-arm64:v1.0.0
ADD etcd /usr/local/bin/ ADD etcd /usr/local/bin/
ADD etcdctl /usr/local/bin/ ADD etcdctl /usr/local/bin/
ADD var/etcd /var/etcd
WORKDIR /var/etcd/ ADD var/lib/etcd /var/lib/etcd
WORKDIR /var/lib/etcd/
EXPOSE 2379 2380 EXPOSE 2379 2380

View File

@ -1,10 +1,9 @@
FROM --platform=linux/ppc64le gcr.io/distroless/static-debian11 FROM k8s.gcr.io/debian-base-ppc64le:v1.0.0
ADD etcd /usr/local/bin/ ADD etcd /usr/local/bin/
ADD etcdctl /usr/local/bin/ ADD etcdctl /usr/local/bin/
ADD var/etcd /var/etcd
WORKDIR /var/etcd/ ADD var/lib/etcd /var/lib/etcd
WORKDIR /var/lib/etcd/
EXPOSE 2379 2380 EXPOSE 2379 2380

View File

@ -128,7 +128,7 @@ for TARGET_ARCH in "amd64" "arm64" "ppc64le"; do
TAG=quay.io/coreos/etcd GOARCH=${TARGET_ARCH} \ TAG=quay.io/coreos/etcd GOARCH=${TARGET_ARCH} \
BINARYDIR=release/etcd-${VERSION}-linux-${TARGET_ARCH} \ BINARYDIR=release/etcd-${VERSION}-linux-${TARGET_ARCH} \
BUILDDIR=release \ BUILDDIR=release \
./scripts/build-docker.sh ${VERSION} ./scripts/build-docker ${VERSION}
done done
``` ```

View File

@ -51,7 +51,7 @@ docker-remove:
GO_VERSION ?= 1.19.9 GO_VERSION ?= 1.12.12
ETCD_VERSION ?= $(shell git rev-parse --short HEAD || echo "GitNotFound") ETCD_VERSION ?= $(shell git rev-parse --short HEAD || echo "GitNotFound")
TEST_SUFFIX = $(shell date +%s | base64 | head -c 15) TEST_SUFFIX = $(shell date +%s | base64 | head -c 15)
@ -65,11 +65,11 @@ endif
# Example: # Example:
# GO_VERSION=1.12.17 make build-docker-test # GO_VERSION=1.12.12 make build-docker-test
# make build-docker-test # make build-docker-test
# #
# gcloud docker -- login -u _json_key -p "$(cat /etc/gcp-key-etcd-development.json)" https://gcr.io # gcloud docker -- login -u _json_key -p "$(cat /etc/gcp-key-etcd-development.json)" https://gcr.io
# GO_VERSION=1.12.17 make push-docker-test # GO_VERSION=1.12.12 make push-docker-test
# make push-docker-test # make push-docker-test
# #
# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com # gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com

View File

@ -5,11 +5,9 @@
Package authpb is a generated protocol buffer package. Package authpb is a generated protocol buffer package.
It is generated from these files: It is generated from these files:
auth.proto auth.proto
It has these top-level messages: It has these top-level messages:
UserAddOptions UserAddOptions
User User
Permission Permission

View File

@ -21,7 +21,7 @@ import (
"errors" "errors"
"time" "time"
"github.com/golang-jwt/jwt" jwt "github.com/dgrijalva/jwt-go"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -42,7 +42,7 @@ func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInf
// rev isn't used in JWT, it is only used in simple token // rev isn't used in JWT, it is only used in simple token
var ( var (
username string username string
revision float64 revision uint64
) )
parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) { parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) {
@ -82,19 +82,10 @@ func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInf
return nil, false return nil, false
} }
username, ok = claims["username"].(string) username = claims["username"].(string)
if !ok { revision = uint64(claims["revision"].(float64))
t.lg.Warn("failed to obtain user claims from jwt token")
return nil, false
}
revision, ok = claims["revision"].(float64) return &AuthInfo{Username: username, Revision: revision}, true
if !ok {
t.lg.Warn("failed to obtain revision claims from jwt token")
return nil, false
}
return &AuthInfo{Username: username, Revision: uint64(revision)}, true
} }
func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) { func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) {

View File

@ -18,10 +18,7 @@ import (
"context" "context"
"fmt" "fmt"
"testing" "testing"
"time"
"github.com/golang-jwt/jwt"
"github.com/stretchr/testify/require"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -205,75 +202,3 @@ func TestJWTBad(t *testing.T) {
func testJWTOpts() string { func testJWTOpts() string {
return fmt.Sprintf("%s,pub-key=%s,priv-key=%s,sign-method=RS256", tokenTypeJWT, jwtRSAPubKey, jwtRSAPrivKey) return fmt.Sprintf("%s,pub-key=%s,priv-key=%s,sign-method=RS256", tokenTypeJWT, jwtRSAPubKey, jwtRSAPrivKey)
} }
func TestJWTTokenWithMissingFields(t *testing.T) {
testCases := []struct {
name string
username string // An empty string means not present
revision uint64 // 0 means not present
expectValid bool
}{
{
name: "valid token",
username: "hello",
revision: 100,
expectValid: true,
},
{
name: "no username",
username: "",
revision: 100,
expectValid: false,
},
{
name: "no revision",
username: "hello",
revision: 0,
expectValid: false,
},
}
for _, tc := range testCases {
tc := tc
optsMap := map[string]string{
"priv-key": jwtRSAPrivKey,
"sign-method": "RS256",
"ttl": "1h",
}
t.Run(tc.name, func(t *testing.T) {
// prepare claims
claims := jwt.MapClaims{
"exp": time.Now().Add(time.Hour).Unix(),
}
if tc.username != "" {
claims["username"] = tc.username
}
if tc.revision != 0 {
claims["revision"] = tc.revision
}
// generate a JWT token with the given claims
var opts jwtOptions
err := opts.ParseWithDefaults(optsMap)
require.NoError(t, err)
key, err := opts.Key()
require.NoError(t, err)
tk := jwt.NewWithClaims(opts.SignMethod, claims)
token, err := tk.SignedString(key)
require.NoError(t, err)
// verify the token
jwtProvider, err := newTokenProviderJWT(zap.NewNop(), optsMap)
require.NoError(t, err)
ai, ok := jwtProvider.info(context.TODO(), token, 123)
require.Equal(t, tc.expectValid, ok)
if ok {
require.Equal(t, tc.username, ai.Username)
require.Equal(t, tc.revision, ai.Revision)
}
})
}
}

View File

@ -21,7 +21,7 @@ import (
"io/ioutil" "io/ioutil"
"time" "time"
"github.com/golang-jwt/jwt" jwt "github.com/dgrijalva/jwt-go"
) )
const ( const (

View File

@ -76,10 +76,8 @@ func checkKeyInterval(
cachedPerms *unifiedRangePermissions, cachedPerms *unifiedRangePermissions,
key, rangeEnd []byte, key, rangeEnd []byte,
permtyp authpb.Permission_Type) bool { permtyp authpb.Permission_Type) bool {
if isOpenEnded(rangeEnd) { if len(rangeEnd) == 1 && rangeEnd[0] == 0 {
rangeEnd = nil rangeEnd = nil
// nil rangeEnd will be converetd to []byte{}, the largest element of BytesAffineComparable,
// in NewBytesAffineInterval().
} }
ivl := adt.NewBytesAffineInterval(key, rangeEnd) ivl := adt.NewBytesAffineInterval(key, rangeEnd)
@ -115,99 +113,41 @@ func checkKeyPoint(lg *zap.Logger, cachedPerms *unifiedRangePermissions, key []b
return false return false
} }
func (as *authStore) isRangeOpPermitted(userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
as.rangePermCacheMu.RLock() // assumption: tx is Lock()ed
defer as.rangePermCacheMu.RUnlock() _, ok := as.rangePermCache[userName]
rangePerm, ok := as.rangePermCache[userName]
if !ok { if !ok {
as.lg.Error(
"user doesn't exist",
zap.String("user-name", userName),
)
return false
}
if len(rangeEnd) == 0 {
return checkKeyPoint(as.lg, rangePerm, key, permtyp)
}
return checkKeyInterval(as.lg, rangePerm, key, rangeEnd, permtyp)
}
func (as *authStore) refreshRangePermCache(tx backend.BatchTx) {
// Note that every authentication configuration update calls this method and it invalidates the entire
// rangePermCache and reconstruct it based on information of users and roles stored in the backend.
// This can be a costly operation.
as.rangePermCacheMu.Lock()
defer as.rangePermCacheMu.Unlock()
as.rangePermCache = make(map[string]*unifiedRangePermissions)
users := getAllUsers(as.lg, tx)
for _, user := range users {
userName := string(user.Name)
perms := getMergedPerms(as.lg, tx, userName) perms := getMergedPerms(as.lg, tx, userName)
if perms == nil { if perms == nil {
as.lg.Error( if as.lg != nil {
as.lg.Warn(
"failed to create a merged permission", "failed to create a merged permission",
zap.String("user-name", userName), zap.String("user-name", userName),
) )
continue } else {
plog.Errorf("failed to create a unified permission of user %s", userName)
}
return false
} }
as.rangePermCache[userName] = perms as.rangePermCache[userName] = perms
} }
if len(rangeEnd) == 0 {
return checkKeyPoint(as.lg, as.rangePermCache[userName], key, permtyp)
}
return checkKeyInterval(as.lg, as.rangePermCache[userName], key, rangeEnd, permtyp)
}
func (as *authStore) clearCachedPerm() {
as.rangePermCache = make(map[string]*unifiedRangePermissions)
}
func (as *authStore) invalidateCachedPerm(userName string) {
delete(as.rangePermCache, userName)
} }
type unifiedRangePermissions struct { type unifiedRangePermissions struct {
readPerms adt.IntervalTree readPerms adt.IntervalTree
writePerms adt.IntervalTree writePerms adt.IntervalTree
} }
// Constraints related to key range
// Assumptions:
// a1. key must be non-nil
// a2. []byte{} (in the case of string, "") is not a valid key of etcd
// For representing an open-ended range, BytesAffineComparable uses []byte{} as the largest element.
// a3. []byte{0x00} is the minimum valid etcd key
//
// Based on the above assumptions, key and rangeEnd must follow below rules:
// b1. for representing a single key point, rangeEnd should be nil or zero length byte array (in the case of string, "")
// Rule a2 guarantees that (X, []byte{}) for any X is not a valid range. So such ranges can be used for representing
// a single key permission.
//
// b2. key range with upper limit, like (X, Y), larger or equal to X and smaller than Y
//
// b3. key range with open-ended, like (X, <open ended>), is represented like (X, []byte{0x00})
// Because of rule a3, if we have (X, []byte{0x00}), such a range represents an empty range and makes no sense to have
// such a permission. So we use []byte{0x00} for representing an open-ended permission.
// Note that rangeEnd with []byte{0x00} will be converted into []byte{} before inserted into the interval tree
// (rule a2 ensures that this is the largest element).
// Special range like key = []byte{0x00} and rangeEnd = []byte{0x00} is treated as a range which matches with all keys.
//
// Treating a range whose rangeEnd with []byte{0x00} as an open-ended comes from the rules of Range() and Watch() API.
func isOpenEnded(rangeEnd []byte) bool { // check rule b3
return len(rangeEnd) == 1 && rangeEnd[0] == 0
}
func isValidPermissionRange(key, rangeEnd []byte) bool {
if len(key) == 0 {
return false
}
if rangeEnd == nil || len(rangeEnd) == 0 { // ensure rule b1
return true
}
begin := adt.BytesAffineComparable(key)
end := adt.BytesAffineComparable(rangeEnd)
if begin.Compare(end) == -1 { // rule b2
return true
}
if isOpenEnded(rangeEnd) {
return true
}
return false
}

View File

@ -45,26 +45,6 @@ func TestRangePermission(t *testing.T) {
[]byte("a"), []byte("f"), []byte("a"), []byte("f"),
true, true,
}, },
{
[]adt.Interval{adt.NewBytesAffineInterval([]byte("a"), []byte("d")), adt.NewBytesAffineInterval([]byte("a"), []byte("b")), adt.NewBytesAffineInterval([]byte("c"), []byte("f"))},
[]byte("a"), []byte{},
false,
},
{
[]adt.Interval{adt.NewBytesAffineInterval([]byte("a"), []byte{})},
[]byte("a"), []byte{},
true,
},
{
[]adt.Interval{adt.NewBytesAffineInterval([]byte{0x00}, []byte{})},
[]byte("a"), []byte{},
true,
},
{
[]adt.Interval{adt.NewBytesAffineInterval([]byte{0x00}, []byte{})},
[]byte{0x00}, []byte{},
true,
},
} }
for i, tt := range tests { for i, tt := range tests {
@ -106,16 +86,6 @@ func TestKeyPermission(t *testing.T) {
[]byte("f"), []byte("f"),
false, false,
}, },
{
[]adt.Interval{adt.NewBytesAffineInterval([]byte("a"), []byte("d")), adt.NewBytesAffineInterval([]byte("a"), []byte("b")), adt.NewBytesAffineInterval([]byte("c"), []byte{})},
[]byte("f"),
true,
},
{
[]adt.Interval{adt.NewBytesAffineInterval([]byte("a"), []byte("d")), adt.NewBytesAffineInterval([]byte("a"), []byte("b")), adt.NewBytesAffineInterval([]byte{0x00}, []byte{})},
[]byte("f"),
true,
},
} }
for i, tt := range tests { for i, tt := range tests {
@ -130,88 +100,3 @@ func TestKeyPermission(t *testing.T) {
} }
} }
} }
func TestRangeCheck(t *testing.T) {
tests := []struct {
name string
key []byte
rangeEnd []byte
want bool
}{
{
name: "valid single key",
key: []byte("a"),
rangeEnd: []byte(""),
want: true,
},
{
name: "valid single key",
key: []byte("a"),
rangeEnd: nil,
want: true,
},
{
name: "valid key range, key < rangeEnd",
key: []byte("a"),
rangeEnd: []byte("b"),
want: true,
},
{
name: "invalid empty key range, key == rangeEnd",
key: []byte("a"),
rangeEnd: []byte("a"),
want: false,
},
{
name: "invalid empty key range, key > rangeEnd",
key: []byte("b"),
rangeEnd: []byte("a"),
want: false,
},
{
name: "invalid key, key must not be \"\"",
key: []byte(""),
rangeEnd: []byte("a"),
want: false,
},
{
name: "invalid key range, key must not be \"\"",
key: []byte(""),
rangeEnd: []byte(""),
want: false,
},
{
name: "invalid key range, key must not be \"\"",
key: []byte(""),
rangeEnd: []byte("\x00"),
want: false,
},
{
name: "valid single key (not useful in practice)",
key: []byte("\x00"),
rangeEnd: []byte(""),
want: true,
},
{
name: "valid key range, larger or equals to \"a\"",
key: []byte("a"),
rangeEnd: []byte("\x00"),
want: true,
},
{
name: "valid key range, which includes all keys",
key: []byte("\x00"),
rangeEnd: []byte("\x00"),
want: true,
},
}
for i, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := isValidPermissionRange(tt.key, tt.rangeEnd)
if result != tt.want {
t.Errorf("#%d: result=%t, want=%t", i, result, tt.want)
}
})
}
}

View File

@ -159,11 +159,6 @@ func (t *tokenSimple) invalidateUser(username string) {
} }
func (t *tokenSimple) enable() { func (t *tokenSimple) enable() {
t.simpleTokensMu.Lock()
defer t.simpleTokensMu.Unlock()
if t.simpleTokenKeeper != nil { // already enabled
return
}
if t.simpleTokenTTL <= 0 { if t.simpleTokenTTL <= 0 {
t.simpleTokenTTL = simpleTokenTTLDefault t.simpleTokenTTL = simpleTokenTTLDefault
} }

View File

@ -59,7 +59,6 @@ var (
ErrRoleAlreadyExist = errors.New("auth: role already exists") ErrRoleAlreadyExist = errors.New("auth: role already exists")
ErrRoleNotFound = errors.New("auth: role not found") ErrRoleNotFound = errors.New("auth: role not found")
ErrRoleEmpty = errors.New("auth: role name is empty") ErrRoleEmpty = errors.New("auth: role name is empty")
ErrPermissionNotGiven = errors.New("auth: permission not given")
ErrAuthFailed = errors.New("auth: authentication failed, invalid user ID or password") ErrAuthFailed = errors.New("auth: authentication failed, invalid user ID or password")
ErrNoPasswordUser = errors.New("auth: authentication failed, password was given for no password user") ErrNoPasswordUser = errors.New("auth: authentication failed, password was given for no password user")
ErrPermissionDenied = errors.New("auth: permission denied") ErrPermissionDenied = errors.New("auth: permission denied")
@ -216,14 +215,7 @@ type authStore struct {
enabled bool enabled bool
enabledMu sync.RWMutex enabledMu sync.RWMutex
// rangePermCache needs to be protected by rangePermCacheMu
// rangePermCacheMu needs to be write locked only in initialization phase or configuration changes
// Hot paths like Range(), needs to acquire read lock for improving performance
//
// Note that BatchTx and ReadTx cannot be a mutex for rangePermCache because they are independent resources
// see also: https://github.com/etcd-io/etcd/pull/13920#discussion_r849114855
rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions
rangePermCacheMu sync.RWMutex
tokenProvider TokenProvider tokenProvider TokenProvider
syncConsistentIndex saveConsistentIndexFunc syncConsistentIndex saveConsistentIndexFunc
@ -266,7 +258,7 @@ func (as *authStore) AuthEnable() error {
as.enabled = true as.enabled = true
as.tokenProvider.enable() as.tokenProvider.enable()
as.refreshRangePermCache(tx) as.rangePermCache = make(map[string]*unifiedRangePermissions)
as.setRevision(getRevision(tx)) as.setRevision(getRevision(tx))
@ -403,15 +395,11 @@ func (as *authStore) Recover(be backend.Backend) {
} }
as.setRevision(getRevision(tx)) as.setRevision(getRevision(tx))
as.refreshRangePermCache(tx)
tx.Unlock() tx.Unlock()
as.enabledMu.Lock() as.enabledMu.Lock()
as.enabled = enabled as.enabled = enabled
if enabled {
as.tokenProvider.enable()
}
as.enabledMu.Unlock() as.enabledMu.Unlock()
} }
@ -466,7 +454,6 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse,
as.commitRevision(tx) as.commitRevision(tx)
as.saveConsistentIndex(tx) as.saveConsistentIndex(tx)
as.refreshRangePermCache(tx)
if as.lg != nil { if as.lg != nil {
as.lg.Info("added a user", zap.String("user-name", r.Name)) as.lg.Info("added a user", zap.String("user-name", r.Name))
@ -499,8 +486,8 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete
as.commitRevision(tx) as.commitRevision(tx)
as.saveConsistentIndex(tx) as.saveConsistentIndex(tx)
as.refreshRangePermCache(tx)
as.invalidateCachedPerm(r.Name)
as.tokenProvider.invalidateUser(r.Name) as.tokenProvider.invalidateUser(r.Name)
if as.lg != nil { if as.lg != nil {
@ -552,8 +539,8 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p
as.commitRevision(tx) as.commitRevision(tx)
as.saveConsistentIndex(tx) as.saveConsistentIndex(tx)
as.refreshRangePermCache(tx)
as.invalidateCachedPerm(r.Name)
as.tokenProvider.invalidateUser(r.Name) as.tokenProvider.invalidateUser(r.Name)
if as.lg != nil { if as.lg != nil {
@ -605,9 +592,10 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser
putUser(as.lg, tx, user) putUser(as.lg, tx, user)
as.invalidateCachedPerm(r.User)
as.commitRevision(tx) as.commitRevision(tx)
as.saveConsistentIndex(tx) as.saveConsistentIndex(tx)
as.refreshRangePermCache(tx)
if as.lg != nil { if as.lg != nil {
as.lg.Info( as.lg.Info(
@ -691,9 +679,10 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs
putUser(as.lg, tx, updatedUser) putUser(as.lg, tx, updatedUser)
as.invalidateCachedPerm(r.Name)
as.commitRevision(tx) as.commitRevision(tx)
as.saveConsistentIndex(tx) as.saveConsistentIndex(tx)
as.refreshRangePermCache(tx)
if as.lg != nil { if as.lg != nil {
as.lg.Info( as.lg.Info(
@ -763,9 +752,12 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest)
putRole(as.lg, tx, updatedRole) putRole(as.lg, tx, updatedRole)
// TODO(mitake): currently single role update invalidates every cache
// It should be optimized.
as.clearCachedPerm()
as.commitRevision(tx) as.commitRevision(tx)
as.saveConsistentIndex(tx) as.saveConsistentIndex(tx)
as.refreshRangePermCache(tx)
if as.lg != nil { if as.lg != nil {
as.lg.Info( as.lg.Info(
@ -821,11 +813,11 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete
putUser(as.lg, tx, updatedUser) putUser(as.lg, tx, updatedUser)
as.invalidateCachedPerm(string(user.Name))
} }
as.commitRevision(tx) as.commitRevision(tx)
as.saveConsistentIndex(tx) as.saveConsistentIndex(tx)
as.refreshRangePermCache(tx)
if as.lg != nil { if as.lg != nil {
as.lg.Info("deleted a role", zap.String("role-name", r.Role)) as.lg.Info("deleted a role", zap.String("role-name", r.Role))
@ -885,13 +877,6 @@ func (perms permSlice) Swap(i, j int) {
} }
func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
if r.Perm == nil {
return nil, ErrPermissionNotGiven
}
if !isValidPermissionRange(r.Perm.Key, r.Perm.RangeEnd) {
return nil, ErrInvalidAuthMgmt
}
tx := as.be.BatchTx() tx := as.be.BatchTx()
tx.Lock() tx.Lock()
defer tx.Unlock() defer tx.Unlock()
@ -922,9 +907,12 @@ func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (
putRole(as.lg, tx, role) putRole(as.lg, tx, role)
// TODO(mitake): currently single role update invalidates every cache
// It should be optimized.
as.clearCachedPerm()
as.commitRevision(tx) as.commitRevision(tx)
as.saveConsistentIndex(tx) as.saveConsistentIndex(tx)
as.refreshRangePermCache(tx)
if as.lg != nil { if as.lg != nil {
as.lg.Info( as.lg.Info(
@ -985,7 +973,7 @@ func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeE
return nil return nil
} }
if as.isRangeOpPermitted(userName, key, rangeEnd, permTyp) { if as.isRangeOpPermitted(tx, userName, key, rangeEnd, permTyp) {
return nil return nil
} }
@ -1051,15 +1039,7 @@ func getUser(lg *zap.Logger, tx backend.BatchTx, username string) *authpb.User {
} }
func getAllUsers(lg *zap.Logger, tx backend.BatchTx) []*authpb.User { func getAllUsers(lg *zap.Logger, tx backend.BatchTx) []*authpb.User {
var vs [][]byte _, vs := tx.UnsafeRange(authUsersBucketName, []byte{0}, []byte{0xff}, -1)
err := tx.UnsafeForEach(authUsersBucketName, func(k []byte, v []byte) error {
vs = append(vs, v)
return nil
})
if err != nil {
lg.Panic("failed to get users",
zap.Error(err))
}
if len(vs) == 0 { if len(vs) == 0 {
return nil return nil
} }
@ -1212,8 +1192,6 @@ func NewAuthStore(lg *zap.Logger, be backend.Backend, tp TokenProvider, bcryptCo
as.setupMetricsReporter() as.setupMetricsReporter()
as.refreshRangePermCache(tx)
tx.Unlock() tx.Unlock()
be.ForceCommit() be.ForceCommit()

View File

@ -16,7 +16,6 @@ package auth
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"os" "os"
"reflect" "reflect"
@ -29,7 +28,6 @@ import (
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb" pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"go.etcd.io/etcd/mvcc/backend" "go.etcd.io/etcd/mvcc/backend"
"go.etcd.io/etcd/pkg/adt"
"go.uber.org/zap" "go.uber.org/zap"
"golang.org/x/crypto/bcrypt" "golang.org/x/crypto/bcrypt"
@ -153,8 +151,7 @@ func TestUserAdd(t *testing.T) {
as, tearDown := setupAuthStore(t) as, tearDown := setupAuthStore(t)
defer tearDown(t) defer tearDown(t)
const userName = "foo" ua := &pb.AuthUserAddRequest{Name: "foo", Options: &authpb.UserAddOptions{NoPassword: false}}
ua := &pb.AuthUserAddRequest{Name: userName, Options: &authpb.UserAddOptions{NoPassword: false}}
_, err := as.UserAdd(ua) // add an existing user _, err := as.UserAdd(ua) // add an existing user
if err == nil { if err == nil {
t.Fatalf("expected %v, got %v", ErrUserAlreadyExist, err) t.Fatalf("expected %v, got %v", ErrUserAlreadyExist, err)
@ -168,11 +165,6 @@ func TestUserAdd(t *testing.T) {
if err != ErrUserEmpty { if err != ErrUserEmpty {
t.Fatal(err) t.Fatal(err)
} }
if _, ok := as.rangePermCache[userName]; !ok {
t.Fatalf("user %s should be added but it doesn't exist in rangePermCache", userName)
}
} }
func TestRecover(t *testing.T) { func TestRecover(t *testing.T) {
@ -187,30 +179,6 @@ func TestRecover(t *testing.T) {
} }
} }
func TestRecoverWithEmptyRangePermCache(t *testing.T) {
as, tearDown := setupAuthStore(t)
defer as.Close()
defer tearDown(t)
as.enabled = false
as.rangePermCache = map[string]*unifiedRangePermissions{}
as.Recover(as.be)
if !as.IsAuthEnabled() {
t.Fatalf("expected auth enabled got disabled")
}
if len(as.rangePermCache) != 2 {
t.Fatalf("rangePermCache should have permission information for 2 users (\"root\" and \"foo\"), but has %d information", len(as.rangePermCache))
}
if _, ok := as.rangePermCache["root"]; !ok {
t.Fatal("user \"root\" should be created by setupAuthStore() but doesn't exist in rangePermCache")
}
if _, ok := as.rangePermCache["foo"]; !ok {
t.Fatal("user \"foo\" should be created by setupAuthStore() but doesn't exist in rangePermCache")
}
}
func TestCheckPassword(t *testing.T) { func TestCheckPassword(t *testing.T) {
as, tearDown := setupAuthStore(t) as, tearDown := setupAuthStore(t)
defer tearDown(t) defer tearDown(t)
@ -245,8 +213,7 @@ func TestUserDelete(t *testing.T) {
defer tearDown(t) defer tearDown(t)
// delete an existing user // delete an existing user
const userName = "foo" ud := &pb.AuthUserDeleteRequest{Name: "foo"}
ud := &pb.AuthUserDeleteRequest{Name: userName}
_, err := as.UserDelete(ud) _, err := as.UserDelete(ud)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -260,47 +227,6 @@ func TestUserDelete(t *testing.T) {
if err != ErrUserNotFound { if err != ErrUserNotFound {
t.Fatalf("expected %v, got %v", ErrUserNotFound, err) t.Fatalf("expected %v, got %v", ErrUserNotFound, err)
} }
if _, ok := as.rangePermCache[userName]; ok {
t.Fatalf("user %s should be deleted but it exists in rangePermCache", userName)
}
}
func TestUserDeleteAndPermCache(t *testing.T) {
as, tearDown := setupAuthStore(t)
defer tearDown(t)
// delete an existing user
const deletedUserName = "foo"
ud := &pb.AuthUserDeleteRequest{Name: deletedUserName}
_, err := as.UserDelete(ud)
if err != nil {
t.Fatal(err)
}
// delete a non-existing user
_, err = as.UserDelete(ud)
if err != ErrUserNotFound {
t.Fatalf("expected %v, got %v", ErrUserNotFound, err)
}
if _, ok := as.rangePermCache[deletedUserName]; ok {
t.Fatalf("user %s should be deleted but it exists in rangePermCache", deletedUserName)
}
// add a new user
const newUser = "bar"
ua := &pb.AuthUserAddRequest{Name: newUser, Options: &authpb.UserAddOptions{NoPassword: false}}
_, err = as.UserAdd(ua)
if err != nil {
t.Fatal(err)
}
if _, ok := as.rangePermCache[newUser]; !ok {
t.Fatalf("user %s should exist but it doesn't exist in rangePermCache", deletedUserName)
}
} }
func TestUserChangePassword(t *testing.T) { func TestUserChangePassword(t *testing.T) {
@ -519,162 +445,6 @@ func TestRoleGrantPermission(t *testing.T) {
if !reflect.DeepEqual(perm, r.Perm[0]) { if !reflect.DeepEqual(perm, r.Perm[0]) {
t.Errorf("expected %v, got %v", perm, r.Perm[0]) t.Errorf("expected %v, got %v", perm, r.Perm[0])
} }
// trying to grant nil permissions returns an error (and doesn't change the actual permissions!)
_, err = as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{
Name: "role-test-1",
})
if err != ErrPermissionNotGiven {
t.Error(err)
}
r, err = as.RoleGet(&pb.AuthRoleGetRequest{Role: "role-test-1"})
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(perm, r.Perm[0]) {
t.Errorf("expected %v, got %v", perm, r.Perm[0])
}
}
func TestRoleGrantInvalidPermission(t *testing.T) {
as, tearDown := setupAuthStore(t)
defer tearDown(t)
_, err := as.RoleAdd(&pb.AuthRoleAddRequest{Name: "role-test-1"})
if err != nil {
t.Fatal(err)
}
tests := []struct {
name string
perm *authpb.Permission
want error
}{
{
name: "valid range",
perm: &authpb.Permission{
PermType: authpb.WRITE,
Key: []byte("Keys"),
RangeEnd: []byte("RangeEnd"),
},
want: nil,
},
{
name: "invalid range: nil key",
perm: &authpb.Permission{
PermType: authpb.WRITE,
Key: nil,
RangeEnd: []byte("RangeEnd"),
},
want: ErrInvalidAuthMgmt,
},
{
name: "valid range: single key",
perm: &authpb.Permission{
PermType: authpb.WRITE,
Key: []byte("Keys"),
RangeEnd: nil,
},
want: nil,
},
{
name: "valid range: single key",
perm: &authpb.Permission{
PermType: authpb.WRITE,
Key: []byte("Keys"),
RangeEnd: []byte{},
},
want: nil,
},
{
name: "invalid range: empty (Key == RangeEnd)",
perm: &authpb.Permission{
PermType: authpb.WRITE,
Key: []byte("a"),
RangeEnd: []byte("a"),
},
want: ErrInvalidAuthMgmt,
},
{
name: "invalid range: empty (Key > RangeEnd)",
perm: &authpb.Permission{
PermType: authpb.WRITE,
Key: []byte("b"),
RangeEnd: []byte("a"),
},
want: ErrInvalidAuthMgmt,
},
{
name: "invalid range: length of key is 0",
perm: &authpb.Permission{
PermType: authpb.WRITE,
Key: []byte(""),
RangeEnd: []byte("a"),
},
want: ErrInvalidAuthMgmt,
},
{
name: "invalid range: length of key is 0",
perm: &authpb.Permission{
PermType: authpb.WRITE,
Key: []byte(""),
RangeEnd: []byte(""),
},
want: ErrInvalidAuthMgmt,
},
{
name: "invalid range: length of key is 0",
perm: &authpb.Permission{
PermType: authpb.WRITE,
Key: []byte(""),
RangeEnd: []byte{0x00},
},
want: ErrInvalidAuthMgmt,
},
{
name: "valid range: single key permission for []byte{0x00}",
perm: &authpb.Permission{
PermType: authpb.WRITE,
Key: []byte{0x00},
RangeEnd: []byte(""),
},
want: nil,
},
{
name: "valid range: \"a\" or larger keys",
perm: &authpb.Permission{
PermType: authpb.WRITE,
Key: []byte("a"),
RangeEnd: []byte{0x00},
},
want: nil,
},
{
name: "valid range: the entire keys",
perm: &authpb.Permission{
PermType: authpb.WRITE,
Key: []byte{0x00},
RangeEnd: []byte{0x00},
},
want: nil,
},
}
for i, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, err = as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{
Name: "role-test-1",
Perm: tt.perm,
})
if !errors.Is(err, tt.want) {
t.Errorf("#%d: result=%t, want=%t", i, err, tt.want)
}
})
}
} }
func TestRoleRevokePermission(t *testing.T) { func TestRoleRevokePermission(t *testing.T) {
@ -733,44 +503,17 @@ func TestUserRevokePermission(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
const userName = "foo" _, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: "foo", Role: "role-test"})
_, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: userName, Role: "role-test"})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
_, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: userName, Role: "role-test-1"}) _, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: "foo", Role: "role-test-1"})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
perm := &authpb.Permission{ u, err := as.UserGet(&pb.AuthUserGetRequest{Name: "foo"})
PermType: authpb.WRITE,
Key: []byte("WriteKeyBegin"),
RangeEnd: []byte("WriteKeyEnd"),
}
_, err = as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{
Name: "role-test-1",
Perm: perm,
})
if err != nil {
t.Fatal(err)
}
if _, ok := as.rangePermCache[userName]; !ok {
t.Fatalf("User %s should have its entry in rangePermCache", userName)
}
unifiedPerm := as.rangePermCache[userName]
pt1 := adt.NewBytesAffinePoint([]byte("WriteKeyBegin"))
if !unifiedPerm.writePerms.Contains(pt1) {
t.Fatal("rangePermCache should contain WriteKeyBegin")
}
pt2 := adt.NewBytesAffinePoint([]byte("OutOfRange"))
if unifiedPerm.writePerms.Contains(pt2) {
t.Fatal("rangePermCache should not contain OutOfRange")
}
u, err := as.UserGet(&pb.AuthUserGetRequest{Name: userName})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -780,12 +523,12 @@ func TestUserRevokePermission(t *testing.T) {
t.Fatalf("expected %v, got %v", expected, u.Roles) t.Fatalf("expected %v, got %v", expected, u.Roles)
} }
_, err = as.UserRevokeRole(&pb.AuthUserRevokeRoleRequest{Name: userName, Role: "role-test-1"}) _, err = as.UserRevokeRole(&pb.AuthUserRevokeRoleRequest{Name: "foo", Role: "role-test-1"})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
u, err = as.UserGet(&pb.AuthUserGetRequest{Name: userName}) u, err = as.UserGet(&pb.AuthUserGetRequest{Name: "foo"})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -44,6 +44,15 @@
} }
] ]
}, },
{
"project": "github.com/dgrijalva/jwt-go",
"licenses": [
{
"type": "MIT License",
"confidence": 0.9891304347826086
}
]
},
{ {
"project": "github.com/dustin/go-humanize", "project": "github.com/dustin/go-humanize",
"licenses": [ "licenses": [
@ -62,15 +71,6 @@
} }
] ]
}, },
{
"project": "github.com/golang-jwt/jwt",
"licenses": [
{
"type": "MIT License",
"confidence": 0.9891304347826086
}
]
},
{ {
"project": "github.com/golang/groupcache/lru", "project": "github.com/golang/groupcache/lru",
"licenses": [ "licenses": [
@ -378,7 +378,7 @@
] ]
}, },
{ {
"project": "golang.org/x/sys", "project": "golang.org/x/sys/unix",
"licenses": [ "licenses": [
{ {
"type": "BSD 3-clause \"New\" or \"Revised\" License", "type": "BSD 3-clause \"New\" or \"Revised\" License",

22
build
View File

@ -1,24 +1,22 @@
#!/usr/bin/env bash #!/usr/bin/env bash
set -euo pipefail
# set some environment variables # set some environment variables
ORG_PATH="go.etcd.io" ORG_PATH="go.etcd.io"
REPO_PATH="${ORG_PATH}/etcd" REPO_PATH="${ORG_PATH}/etcd"
GIT_SHA=$(git rev-parse --short HEAD || echo "GitNotFound") GIT_SHA=$(git rev-parse --short HEAD || echo "GitNotFound")
if [[ -n "${FAILPOINTS:-}" ]]; then if [[ -n "$FAILPOINTS" ]]; then
GIT_SHA="$GIT_SHA"-FAILPOINTS GIT_SHA="$GIT_SHA"-FAILPOINTS
fi fi
# Set GO_LDFLAGS="-s" for building without symbols for debugging. # Set GO_LDFLAGS="-s" for building without symbols for debugging.
GO_LDFLAGS="${GO_LDFLAGS:-} -X ${REPO_PATH}/version.GitSHA=${GIT_SHA}" GO_LDFLAGS="$GO_LDFLAGS -X ${REPO_PATH}/version.GitSHA=${GIT_SHA}"
# enable/disable failpoints # enable/disable failpoints
toggle_failpoints() { toggle_failpoints() {
mode="$1" mode="$1"
if command -v gofail >/dev/null 2>&1; then if command -v gofail >/dev/null 2>&1; then
gofail "$mode" etcdserver/ mvcc/backend/ wal/ gofail "$mode" etcdserver/ mvcc/backend/
elif [[ "$mode" != "disable" ]]; then elif [[ "$mode" != "disable" ]]; then
echo "FAILPOINTS set but gofail not found" echo "FAILPOINTS set but gofail not found"
exit 1 exit 1
@ -32,7 +30,7 @@ etcd_setup_gopath() {
cd "$CDIR" || return cd "$CDIR" || return
etcdGOPATH="${CDIR}/gopath" etcdGOPATH="${CDIR}/gopath"
# preserve old gopath to support building with unvendored tooling deps (e.g., gofail) # preserve old gopath to support building with unvendored tooling deps (e.g., gofail)
if [[ -n "${GOPATH:-}" ]]; then if [[ -n "$GOPATH" ]]; then
GOPATH=":$GOPATH" GOPATH=":$GOPATH"
fi fi
rm -rf "${etcdGOPATH:?}/" rm -rf "${etcdGOPATH:?}/"
@ -44,23 +42,23 @@ etcd_setup_gopath() {
toggle_failpoints_default() { toggle_failpoints_default() {
mode="disable" mode="disable"
if [[ -n "${FAILPOINTS:-}" ]]; then mode="enable"; fi if [[ -n "$FAILPOINTS" ]]; then mode="enable"; fi
toggle_failpoints "$mode" toggle_failpoints "$mode"
} }
etcd_build() { etcd_build() {
out="bin" out="bin"
if [[ -n "${BINDIR:-}" ]]; then out="${BINDIR}"; fi if [[ -n "${BINDIR}" ]]; then out="${BINDIR}"; fi
toggle_failpoints_default toggle_failpoints_default
# Static compilation is useful when etcd is run in a container. $GO_BUILD_FLAGS is OK # Static compilation is useful when etcd is run in a container. $GO_BUILD_FLAGS is OK
# shellcheck disable=SC2086 # shellcheck disable=SC2086
CGO_ENABLED=0 go build ${GO_BUILD_FLAGS:-} \ CGO_ENABLED=0 go build $GO_BUILD_FLAGS \
-installsuffix cgo \ -installsuffix cgo \
-ldflags "$GO_LDFLAGS" \ -ldflags "$GO_LDFLAGS" \
-o "${out}/etcd" ${REPO_PATH} || return -o "${out}/etcd" ${REPO_PATH} || return
# shellcheck disable=SC2086 # shellcheck disable=SC2086
CGO_ENABLED=0 go build ${GO_BUILD_FLAGS:-} \ CGO_ENABLED=0 go build $GO_BUILD_FLAGS \
-installsuffix cgo \ -installsuffix cgo \
-ldflags "$GO_LDFLAGS" \ -ldflags "$GO_LDFLAGS" \
-o "${out}/etcdctl" ${REPO_PATH}/etcdctl || return -o "${out}/etcdctl" ${REPO_PATH}/etcdctl || return
@ -68,7 +66,7 @@ etcd_build() {
tools_build() { tools_build() {
out="bin" out="bin"
if [[ -n "${BINDIR:-}" ]]; then out="${BINDIR}"; fi if [[ -n "${BINDIR}" ]]; then out="${BINDIR}"; fi
tools_path="tools/benchmark tools_path="tools/benchmark
tools/etcd-dump-db tools/etcd-dump-db
tools/etcd-dump-logs tools/etcd-dump-logs
@ -90,7 +88,7 @@ tools_build() {
toggle_failpoints_default toggle_failpoints_default
if [[ "${ETCD_SETUP_GOPATH:-}" == "1" ]]; then if [[ "${ETCD_SETUP_GOPATH}" == "1" ]]; then
etcd_setup_gopath etcd_setup_gopath
fi fi

View File

@ -68,5 +68,6 @@ Use a custom context to set timeouts on your operations:
// handle error // handle error
} }
} }
*/ */
package client package client

View File

@ -19,6 +19,7 @@ import (
"fmt" "fmt"
"strings" "strings"
"testing" "testing"
"time"
"go.etcd.io/etcd/clientv3/balancer/picker" "go.etcd.io/etcd/clientv3/balancer/picker"
"go.etcd.io/etcd/clientv3/balancer/resolver/endpoint" "go.etcd.io/etcd/clientv3/balancer/resolver/endpoint"
@ -91,25 +92,24 @@ func TestRoundRobinBalancedResolvableNoFailover(t *testing.T) {
return picked, err return picked, err
} }
_, picked, err := warmupConnections(reqFunc, tc.serverCount, "") prev, switches := "", 0
if err != nil {
t.Fatalf("Unexpected failure %v", err)
}
// verify that we round robin
prev, switches := picked, 0
for i := 0; i < tc.reqN; i++ { for i := 0; i < tc.reqN; i++ {
picked, err = reqFunc(context.Background()) picked, err := reqFunc(context.Background())
if err != nil { if err != nil {
t.Fatalf("#%d: unexpected failure %v", i, err) t.Fatalf("#%d: unexpected failure %v", i, err)
} }
if prev == "" {
prev = picked
continue
}
if prev != picked { if prev != picked {
switches++ switches++
} }
prev = picked prev = picked
} }
if tc.serverCount > 1 && switches != tc.reqN { if tc.serverCount > 1 && switches < tc.reqN-3 { // -3 for initial resolutions
t.Fatalf("expected balanced loads for %d requests, got switches %d", tc.reqN, switches) // TODO: FIX ME
t.Skipf("expected balanced loads for %d requests, got switches %d", tc.reqN, switches)
} }
}) })
} }
@ -160,21 +160,26 @@ func TestRoundRobinBalancedResolvableFailoverFromServerFail(t *testing.T) {
} }
// stop first server, loads should be redistributed // stop first server, loads should be redistributed
// stopped server should never be picked
ms.StopAt(0) ms.StopAt(0)
// stopped server will be transitioned into TRANSIENT_FAILURE state available := make(map[string]struct{})
// but it doesn't happen instantaneously and it can still be picked for a short period of time for i := 1; i < serverCount; i++ {
// we ignore "transport is closing" in such case available[eps[i]] = struct{}{}
available, picked, err := warmupConnections(reqFunc, serverCount-1, "transport is closing")
if err != nil {
t.Fatalf("Unexpected failure %v", err)
} }
reqN := 10 reqN := 10
prev, switches := picked, 0 prev, switches := "", 0
for i := 0; i < reqN; i++ { for i := 0; i < reqN; i++ {
picked, err = reqFunc(context.Background()) picked, err := reqFunc(context.Background())
if err != nil { if err != nil && strings.Contains(err.Error(), "transport is closing") {
t.Fatalf("#%d: unexpected failure %v", i, err) continue
}
if prev == "" { // first failover
if eps[0] == picked {
t.Fatalf("expected failover from %q, picked %q", eps[0], picked)
}
prev = picked
continue
} }
if _, ok := available[picked]; !ok { if _, ok := available[picked]; !ok {
t.Fatalf("picked unavailable address %q (available %v)", picked, available) t.Fatalf("picked unavailable address %q (available %v)", picked, available)
@ -184,18 +189,18 @@ func TestRoundRobinBalancedResolvableFailoverFromServerFail(t *testing.T) {
} }
prev = picked prev = picked
} }
if switches != reqN { if switches < reqN-3 { // -3 for initial resolutions + failover
t.Fatalf("expected balanced loads for %d requests, got switches %d", reqN, switches) // TODO: FIX ME!
t.Skipf("expected balanced loads for %d requests, got switches %d", reqN, switches)
} }
// now failed server comes back // now failed server comes back
ms.StartAt(0) ms.StartAt(0)
available, picked, err = warmupConnections(reqFunc, serverCount, "")
if err != nil {
t.Fatalf("Unexpected failure %v", err)
}
prev, switches = picked, 0 // enough time for reconnecting to recovered server
time.Sleep(time.Second)
prev, switches = "", 0
recoveredAddr, recovered := eps[0], 0 recoveredAddr, recovered := eps[0], 0
available[recoveredAddr] = struct{}{} available[recoveredAddr] = struct{}{}
@ -204,6 +209,10 @@ func TestRoundRobinBalancedResolvableFailoverFromServerFail(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("#%d: unexpected failure %v", i, err) t.Fatalf("#%d: unexpected failure %v", i, err)
} }
if prev == "" {
prev = picked
continue
}
if _, ok := available[picked]; !ok { if _, ok := available[picked]; !ok {
t.Fatalf("#%d: picked unavailable address %q (available %v)", i, picked, available) t.Fatalf("#%d: picked unavailable address %q (available %v)", i, picked, available)
} }
@ -215,10 +224,10 @@ func TestRoundRobinBalancedResolvableFailoverFromServerFail(t *testing.T) {
} }
prev = picked prev = picked
} }
if switches != 2*reqN { if switches < reqN-3 { // -3 for initial resolutions
t.Fatalf("expected balanced loads for %d requests, got switches %d", reqN, switches) t.Fatalf("expected balanced loads for %d requests, got switches %d", reqN, switches)
} }
if recovered != 2*reqN/serverCount { if recovered < reqN/serverCount {
t.Fatalf("recovered server %q got only %d requests", recoveredAddr, recovered) t.Fatalf("recovered server %q got only %d requests", recoveredAddr, recovered)
} }
} }
@ -233,10 +242,11 @@ func TestRoundRobinBalancedResolvableFailoverFromRequestFail(t *testing.T) {
} }
defer ms.Stop() defer ms.Stop()
var eps []string var eps []string
available := make(map[string]struct{})
for _, svr := range ms.Servers { for _, svr := range ms.Servers {
eps = append(eps, svr.ResolverAddress().Addr) eps = append(eps, svr.ResolverAddress().Addr)
available[svr.Address] = struct{}{}
} }
rsv, err := endpoint.NewResolverGroup("requestfail") rsv, err := endpoint.NewResolverGroup("requestfail")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -267,11 +277,6 @@ func TestRoundRobinBalancedResolvableFailoverFromRequestFail(t *testing.T) {
return picked, err return picked, err
} }
available, picked, err := warmupConnections(reqFunc, serverCount, "")
if err != nil {
t.Fatalf("Unexpected failure %v", err)
}
reqN := 20 reqN := 20
prev, switches := "", 0 prev, switches := "", 0
for i := 0; i < reqN; i++ { for i := 0; i < reqN; i++ {
@ -280,13 +285,17 @@ func TestRoundRobinBalancedResolvableFailoverFromRequestFail(t *testing.T) {
if i%2 == 0 { if i%2 == 0 {
cancel() cancel()
} }
picked, err = reqFunc(ctx) picked, err := reqFunc(ctx)
if i%2 == 0 { if i%2 == 0 {
if s, ok := status.FromError(err); ok && s.Code() != codes.Canceled { if s, ok := status.FromError(err); ok && s.Code() != codes.Canceled || picked != "" {
t.Fatalf("#%d: expected %v, got %v", i, context.Canceled, err) t.Fatalf("#%d: expected %v, got %v", i, context.Canceled, err)
} }
continue continue
} }
if prev == "" && picked != "" {
prev = picked
continue
}
if _, ok := available[picked]; !ok { if _, ok := available[picked]; !ok {
t.Fatalf("#%d: picked unavailable address %q (available %v)", i, picked, available) t.Fatalf("#%d: picked unavailable address %q (available %v)", i, picked, available)
} }
@ -295,29 +304,7 @@ func TestRoundRobinBalancedResolvableFailoverFromRequestFail(t *testing.T) {
} }
prev = picked prev = picked
} }
if switches != reqN/2 { if switches < reqN/2-3 { // -3 for initial resolutions + failover
t.Fatalf("expected balanced loads for %d requests, got switches %d", reqN, switches) t.Fatalf("expected balanced loads for %d requests, got switches %d", reqN, switches)
} }
} }
type reqFuncT = func(ctx context.Context) (picked string, err error)
func warmupConnections(reqFunc reqFuncT, serverCount int, ignoreErr string) (map[string]struct{}, string, error) {
var picked string
var err error
available := make(map[string]struct{})
// cycle through all peers to indirectly verify that balancer subconn list is fully loaded
// otherwise we can't reliably count switches between 'picked' peers in the test assert phase
for len(available) < serverCount {
picked, err = reqFunc(context.Background())
if err != nil {
if ignoreErr != "" && strings.Contains(err.Error(), ignoreErr) {
// skip ignored errors
continue
}
return available, picked, err
}
available[picked] = struct{}{}
}
return available, picked, err
}

View File

@ -174,10 +174,8 @@ func (c *Client) Sync(ctx context.Context) error {
} }
var eps []string var eps []string
for _, m := range mresp.Members { for _, m := range mresp.Members {
if len(m.Name) != 0 && !m.IsLearner {
eps = append(eps, m.ClientURLs...) eps = append(eps, m.ClientURLs...)
} }
}
c.SetEndpoints(eps...) c.SetEndpoints(eps...)
return nil return nil
} }

View File

@ -22,7 +22,6 @@ import (
"time" "time"
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
"go.etcd.io/etcd/etcdserver/etcdserverpb"
"go.etcd.io/etcd/pkg/testutil" "go.etcd.io/etcd/pkg/testutil"
"google.golang.org/grpc" "google.golang.org/grpc"
@ -167,51 +166,3 @@ func TestCloseCtxClient(t *testing.T) {
t.Errorf("failed to Close the client. %v", err) t.Errorf("failed to Close the client. %v", err)
} }
} }
func TestSyncFiltersMembers(t *testing.T) {
defer testutil.AfterTest(t)
c, _ := New(Config{Endpoints: []string{"http://254.0.0.1:12345"}})
c.Cluster = &mockCluster{
[]*etcdserverpb.Member{
{ID: 0, Name: "", ClientURLs: []string{"http://254.0.0.1:12345"}, IsLearner: false},
{ID: 1, Name: "isStarted", ClientURLs: []string{"http://254.0.0.2:12345"}, IsLearner: true},
{ID: 2, Name: "isStartedAndNotLearner", ClientURLs: []string{"http://254.0.0.3:12345"}, IsLearner: false},
},
}
c.Sync(context.Background())
endpoints := c.Endpoints()
if len(endpoints) != 1 || endpoints[0] != "http://254.0.0.3:12345" {
t.Error("Client.Sync uses learner and/or non-started member client URLs")
}
c.Close()
}
type mockCluster struct {
members []*etcdserverpb.Member
}
func (mc *mockCluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
return &MemberListResponse{Members: mc.members}, nil
}
func (mc *mockCluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
return nil, nil
}
func (mc *mockCluster) MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
return nil, nil
}
func (mc *mockCluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
return nil, nil
}
func (mc *mockCluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
return nil, nil
}
func (mc *mockCluster) MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) {
return nil, nil
}

View File

@ -65,10 +65,14 @@ func TestResumeElection(t *testing.T) {
respChan := make(chan *clientv3.GetResponse) respChan := make(chan *clientv3.GetResponse)
go func() { go func() {
defer close(respChan)
o := e.Observe(ctx) o := e.Observe(ctx)
respChan <- nil respChan <- nil
for resp := range o { for {
select {
case resp, ok := <-o:
if !ok {
t.Fatal("Observe() channel closed prematurely")
}
// Ignore any observations that candidate1 was elected // Ignore any observations that candidate1 was elected
if string(resp.Kvs[0].Value) == "candidate1" { if string(resp.Kvs[0].Value) == "candidate1" {
continue continue
@ -76,7 +80,7 @@ func TestResumeElection(t *testing.T) {
respChan <- &resp respChan <- &resp
return return
} }
t.Error("Observe() channel closed prematurely") }
}() }()
// wait until observe goroutine is running // wait until observe goroutine is running

View File

@ -102,4 +102,5 @@
// The grpc load balancer is registered statically and is shared across etcd clients. // The grpc load balancer is registered statically and is shared across etcd clients.
// To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment // To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment
// variable. E.g. "ETCD_CLIENT_DEBUG=1". // variable. E.g. "ETCD_CLIENT_DEBUG=1".
//
package clientv3 package clientv3

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build !cluster_proxy
// +build !cluster_proxy // +build !cluster_proxy
package integration package integration

View File

@ -619,28 +619,16 @@ func TestLeasingTxnOwnerGet(t *testing.T) {
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t) defer clus.Terminate(t)
client := clus.Client(0)
lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/") lkv, closeLKV, err := leasing.NewKV(clus.Client(0), "pfx/")
testutil.AssertNil(t, err) testutil.AssertNil(t, err)
defer closeLKV()
defer func() {
// In '--tags cluster_proxy' mode the client need to be closed before
// closeLKV(). This interrupts all outstanding watches. Closing by closeLKV()
// is not sufficient as (unfortunately) context close does not interrupts Watches.
// See ./clientv3/watch.go:
// >> Currently, client contexts are overwritten with "valCtx" that never closes. <<
clus.TakeClient(0) // avoid double Close() of the client.
client.Close()
closeLKV()
}()
keyCount := rand.Intn(10) + 1 keyCount := rand.Intn(10) + 1
var ops []clientv3.Op var ops []clientv3.Op
presps := make([]*clientv3.PutResponse, keyCount) presps := make([]*clientv3.PutResponse, keyCount)
for i := range presps { for i := range presps {
k := fmt.Sprintf("k-%d", i) k := fmt.Sprintf("k-%d", i)
presp, err := client.Put(context.TODO(), k, k+k) presp, err := clus.Client(0).Put(context.TODO(), k, k+k)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build !cluster_proxy
// +build !cluster_proxy // +build !cluster_proxy
package integration package integration

View File

@ -114,7 +114,7 @@ func authSetupRoot(t *testing.T, auth clientv3.Auth) {
func TestGetTokenWithoutAuth(t *testing.T) { func TestGetTokenWithoutAuth(t *testing.T) {
defer testutil.AfterTest(t) defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 2}) clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 10})
defer clus.Terminate(t) defer clus.Terminate(t)
authapi := clus.RandClient() authapi := clus.RandClient()
@ -130,7 +130,7 @@ func TestGetTokenWithoutAuth(t *testing.T) {
// "Username" and "Password" must be used // "Username" and "Password" must be used
cfg := clientv3.Config{ cfg := clientv3.Config{
Endpoints: authapi.Endpoints(), Endpoints: authapi.Endpoints(),
DialTimeout: 5 * time.Second, DialTimeout: 1 * time.Second, // make sure all connection time of connect all endpoint must be more DialTimeout
Username: "root", Username: "root",
Password: "123", Password: "123",
} }
@ -142,7 +142,7 @@ func TestGetTokenWithoutAuth(t *testing.T) {
switch err { switch err {
case nil: case nil:
t.Log("passes as expected") t.Log("passes as expected, but may be connection time less than DialTimeout")
case context.DeadlineExceeded: case context.DeadlineExceeded:
t.Errorf("not expected result:%v with endpoint:%s", err, authapi.Endpoints()) t.Errorf("not expected result:%v with endpoint:%s", err, authapi.Endpoints())
case rpctypes.ErrAuthNotEnabled: case rpctypes.ErrAuthNotEnabled:
@ -150,4 +150,5 @@ func TestGetTokenWithoutAuth(t *testing.T) {
default: default:
t.Errorf("other errors:%v", err) t.Errorf("other errors:%v", err)
} }
} }

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build !cluster_proxy
// +build !cluster_proxy // +build !cluster_proxy
package integration package integration

View File

@ -338,9 +338,6 @@ func putAndWatch(t *testing.T, wctx *watchctx, key, val string) {
if !ok { if !ok {
t.Fatalf("unexpected watch close") t.Fatalf("unexpected watch close")
} }
if err := v.Err(); err != nil {
t.Fatalf("unexpected watch response error: %v", err)
}
if string(v.Events[0].Kv.Value) != val { if string(v.Events[0].Kv.Value) != val {
t.Fatalf("bad value got %v, wanted %v", v.Events[0].Kv.Value, val) t.Fatalf("bad value got %v, wanted %v", v.Events[0].Kv.Value, val)
} }
@ -610,9 +607,6 @@ func TestConfigurableWatchProgressNotifyInterval(t *testing.T) {
} }
func TestWatchRequestProgress(t *testing.T) { func TestWatchRequestProgress(t *testing.T) {
if integration.ThroughProxy {
t.Skip("grpc-proxy does not support WatchProgress yet")
}
testCases := []struct { testCases := []struct {
name string name string
watchers []string watchers []string

View File

@ -42,4 +42,5 @@
// } // }
// lkv2.Put(context.TODO(), "abc", "456") // lkv2.Put(context.TODO(), "abc", "456")
// resp, err = lkv.Get("abc") // resp, err = lkv.Get("abc")
//
package leasing package leasing

View File

@ -39,4 +39,5 @@
// resp, _ = cli.Get(context.TODO(), "abc") // resp, _ = cli.Get(context.TODO(), "abc")
// fmt.Printf("%s\n", resp.Kvs[0].Value) // fmt.Printf("%s\n", resp.Kvs[0].Value)
// // Output: 456 // // Output: 456
//
package namespace package namespace

View File

@ -52,4 +52,5 @@
// r := &etcdnaming.GRPCResolver{Client: c} // r := &etcdnaming.GRPCResolver{Client: c}
// return r.Update(c.Ctx(), service, naming.Update{Op: naming.Add, Addr: addr}, clientv3.WithLease(lid)) // return r.Update(c.Ctx(), service, naming.Update{Op: naming.Add, Addr: addr}, clientv3.WithLease(lid))
// } // }
//
package naming package naming

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package naming_test package naming
import ( import (
"context" "context"
@ -21,7 +21,6 @@ import (
"testing" "testing"
etcd "go.etcd.io/etcd/clientv3" etcd "go.etcd.io/etcd/clientv3"
namingv3 "go.etcd.io/etcd/clientv3/naming"
"go.etcd.io/etcd/integration" "go.etcd.io/etcd/integration"
"go.etcd.io/etcd/pkg/testutil" "go.etcd.io/etcd/pkg/testutil"
@ -34,7 +33,7 @@ func TestGRPCResolver(t *testing.T) {
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t) defer clus.Terminate(t)
r := namingv3.GRPCResolver{ r := GRPCResolver{
Client: clus.RandClient(), Client: clus.RandClient(),
} }
@ -108,7 +107,7 @@ func TestGRPCResolverMulti(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
r := namingv3.GRPCResolver{c} r := GRPCResolver{c}
w, err := r.Resolve("foo") w, err := r.Resolve("foo")
if err != nil { if err != nil {

View File

@ -77,9 +77,6 @@ type Op struct {
cmps []Cmp cmps []Cmp
thenOps []Op thenOps []Op
elseOps []Op elseOps []Op
isOptsWithFromKey bool
isOptsWithPrefix bool
} }
// accessors / mutators // accessors / mutators
@ -219,10 +216,6 @@ func (op Op) isWrite() bool {
return op.t != tRange return op.t != tRange
} }
func NewOp() *Op {
return &Op{key: []byte("")}
}
// OpGet returns "get" operation based on given key and operation options. // OpGet returns "get" operation based on given key and operation options.
func OpGet(key string, opts ...OpOption) Op { func OpGet(key string, opts ...OpOption) Op {
// WithPrefix and WithFromKey are not supported together // WithPrefix and WithFromKey are not supported together
@ -394,7 +387,6 @@ func WithPrefix() OpOption {
return return
} }
op.end = getPrefix(op.key) op.end = getPrefix(op.key)
op.isOptsWithPrefix = true
} }
} }
@ -414,7 +406,6 @@ func WithFromKey() OpOption {
op.key = []byte{0} op.key = []byte{0}
} }
op.end = []byte("\x00") op.end = []byte("\x00")
op.isOptsWithFromKey = true
} }
} }
@ -563,21 +554,7 @@ func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLi
} }
// isWithPrefix returns true if WithPrefix is being called in the op // isWithPrefix returns true if WithPrefix is being called in the op
func isWithPrefix(opts []OpOption) bool { func isWithPrefix(opts []OpOption) bool { return isOpFuncCalled("WithPrefix", opts) }
ret := NewOp()
for _, opt := range opts {
opt(ret)
}
return ret.isOptsWithPrefix
}
// isWithFromKey returns true if WithFromKey is being called in the op // isWithFromKey returns true if WithFromKey is being called in the op
func isWithFromKey(opts []OpOption) bool { func isWithFromKey(opts []OpOption) bool { return isOpFuncCalled("WithFromKey", opts) }
ret := NewOp()
for _, opt := range opts {
opt(ret)
}
return ret.isOptsWithFromKey
}

View File

@ -38,4 +38,5 @@
// cli.KV = ordering.NewKV(cli.KV, vf) // cli.KV = ordering.NewKV(cli.KV, vf)
// //
// Now calls using 'cli' will reject order violations with an error. // Now calls using 'cli' will reject order violations with an error.
//
package ordering package ordering

View File

@ -16,7 +16,8 @@ package ordering
import ( import (
"errors" "errors"
"sync/atomic" "sync"
"time"
"go.etcd.io/etcd/clientv3" "go.etcd.io/etcd/clientv3"
) )
@ -25,18 +26,26 @@ type OrderViolationFunc func(op clientv3.Op, resp clientv3.OpResponse, prevRev i
var ErrNoGreaterRev = errors.New("etcdclient: no cluster members have a revision higher than the previously received revision") var ErrNoGreaterRev = errors.New("etcdclient: no cluster members have a revision higher than the previously received revision")
func NewOrderViolationSwitchEndpointClosure(c *clientv3.Client) OrderViolationFunc { func NewOrderViolationSwitchEndpointClosure(c clientv3.Client) OrderViolationFunc {
violationCount := int32(0) var mu sync.Mutex
return func(_ clientv3.Op, _ clientv3.OpResponse, _ int64) error { violationCount := 0
// Each request is assigned by round-robin load-balancer's picker to a different return func(op clientv3.Op, resp clientv3.OpResponse, prevRev int64) error {
// endpoints. If we cycled them 5 times (even with some level of concurrency), if violationCount > len(c.Endpoints()) {
// with high probability no endpoint points on a member with fresh data.
// TODO: Ideally we should track members (resp.opp.Header) that returned
// stale result and explicitly temporarily disable them in 'picker'.
if atomic.LoadInt32(&violationCount) > int32(5*len(c.Endpoints())) {
return ErrNoGreaterRev return ErrNoGreaterRev
} }
atomic.AddInt32(&violationCount, 1) mu.Lock()
defer mu.Unlock()
eps := c.Endpoints()
// force client to connect to given endpoint by limiting to a single endpoint
c.SetEndpoints(eps[violationCount%len(eps)])
// give enough time for operation
time.Sleep(1 * time.Second)
// set available endpoints back to all endpoints in to ensure
// the client has access to all the endpoints.
c.SetEndpoints(eps...)
// give enough time for operation
time.Sleep(1 * time.Second)
violationCount++
return nil return nil
} }
} }

View File

@ -64,19 +64,19 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) {
// NewOrderViolationSwitchEndpointClosure will be able to // NewOrderViolationSwitchEndpointClosure will be able to
// access the full list of endpoints. // access the full list of endpoints.
cli.SetEndpoints(eps...) cli.SetEndpoints(eps...)
orderingKv := NewKV(cli.KV, NewOrderViolationSwitchEndpointClosure(cli)) OrderingKv := NewKV(cli.KV, NewOrderViolationSwitchEndpointClosure(*cli))
// set prevRev to the second member's revision of "foo" such that // set prevRev to the second member's revision of "foo" such that
// the revision is higher than the third member's revision of "foo" // the revision is higher than the third member's revision of "foo"
_, err = orderingKv.Get(ctx, "foo") _, err = OrderingKv.Get(ctx, "foo")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
t.Logf("Reconfigure client to speak only to the 'partitioned' member")
cli.SetEndpoints(clus.Members[2].GRPCAddr()) cli.SetEndpoints(clus.Members[2].GRPCAddr())
_, err = orderingKv.Get(ctx, "foo", clientv3.WithSerializable()) time.Sleep(1 * time.Second) // give enough time for operation
if err != ErrNoGreaterRev { _, err = OrderingKv.Get(ctx, "foo", clientv3.WithSerializable())
t.Fatal("While speaking to partitioned leader, we should get ErrNoGreaterRev error") if err != nil {
t.Fatalf("failed to resolve order violation %v", err)
} }
} }
@ -123,7 +123,7 @@ func TestUnresolvableOrderViolation(t *testing.T) {
// access the full list of endpoints. // access the full list of endpoints.
cli.SetEndpoints(eps...) cli.SetEndpoints(eps...)
time.Sleep(1 * time.Second) // give enough time for operation time.Sleep(1 * time.Second) // give enough time for operation
OrderingKv := NewKV(cli.KV, NewOrderViolationSwitchEndpointClosure(cli)) OrderingKv := NewKV(cli.KV, NewOrderViolationSwitchEndpointClosure(*cli))
// set prevRev to the first member's revision of "foo" such that // set prevRev to the first member's revision of "foo" such that
// the revision is higher than the fourth and fifth members' revision of "foo" // the revision is higher than the fourth and fifth members' revision of "foo"
_, err = OrderingKv.Get(ctx, "foo") _, err = OrderingKv.Get(ctx, "foo")

View File

@ -73,8 +73,8 @@ func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOpt
// its the callCtx deadline or cancellation, in which case try again. // its the callCtx deadline or cancellation, in which case try again.
continue continue
} }
if c.shouldRefreshToken(lastErr, callOpts) { if callOpts.retryAuth && rpctypes.Error(lastErr) == rpctypes.ErrInvalidAuthToken {
gterr := c.refreshToken(ctx) gterr := c.getToken(ctx)
if gterr != nil { if gterr != nil {
logger.Warn( logger.Warn(
"retrying of unary invoker failed to fetch new auth token", "retrying of unary invoker failed to fetch new auth token",
@ -142,37 +142,6 @@ func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOp
} }
} }
// shouldRefreshToken checks whether there's a need to refresh the token based on the error and callOptions,
// and returns a boolean value.
func (c *Client) shouldRefreshToken(err error, callOpts *options) bool {
if rpctypes.Error(err) == rpctypes.ErrUserEmpty {
// refresh the token when username, password is present but the server returns ErrUserEmpty
// which is possible when the client token is cleared somehow
return c.authTokenBundle != nil // equal to c.Username != "" && c.Password != ""
}
return callOpts.retryAuth &&
(rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken || rpctypes.Error(err) == rpctypes.ErrAuthOldRevision)
}
func (c *Client) refreshToken(ctx context.Context) error {
if c.authTokenBundle == nil {
// c.authTokenBundle will be initialized only when
// c.Username != "" && c.Password != "".
//
// When users use the TLS CommonName based authentication, the
// authTokenBundle is always nil. But it's possible for the clients
// to get `rpctypes.ErrAuthOldRevision` response when the clients
// concurrently modify auth data (e.g, addUser, deleteUser etc.).
// In this case, there is no need to refresh the token; instead the
// clients just need to retry the operations (e.g. Put, Delete etc).
return nil
}
// clear auth token before refreshing it.
c.authTokenBundle.UpdateAuthToken("")
return c.getToken(ctx)
}
// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a // type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a
// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish // proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish
// a new ClientStream according to the retry policy. // a new ClientStream according to the retry policy.
@ -270,8 +239,8 @@ func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}
// its the callCtx deadline or cancellation, in which case try again. // its the callCtx deadline or cancellation, in which case try again.
return true, err return true, err
} }
if s.client.shouldRefreshToken(err, s.callOpts) { if s.callOpts.retryAuth && rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
gterr := s.client.refreshToken(s.ctx) gterr := s.client.getToken(s.ctx)
if gterr != nil { if gterr != nil {
s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gterr)) s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gterr))
return false, err // return the original error for simplicity return false, err // return the original error for simplicity

View File

@ -1,141 +0,0 @@
// Copyright 2022 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Based on github.com/grpc-ecosystem/go-grpc-middleware/retry, but modified to support the more
// fine grained error checking required by write-at-most-once retry semantics of etcd.
package clientv3
import (
"go.etcd.io/etcd/clientv3/credentials"
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
grpccredentials "google.golang.org/grpc/credentials"
"testing"
)
type dummyAuthTokenBundle struct{}
func (d dummyAuthTokenBundle) TransportCredentials() grpccredentials.TransportCredentials {
return nil
}
func (d dummyAuthTokenBundle) PerRPCCredentials() grpccredentials.PerRPCCredentials {
return nil
}
func (d dummyAuthTokenBundle) NewWithMode(mode string) (grpccredentials.Bundle, error) {
return nil, nil
}
func (d dummyAuthTokenBundle) UpdateAuthToken(token string) {
}
func TestClientShouldRefreshToken(t *testing.T) {
type fields struct {
authTokenBundle credentials.Bundle
}
type args struct {
err error
callOpts *options
}
optsWithTrue := &options{
retryAuth: true,
}
optsWithFalse := &options{
retryAuth: false,
}
tests := []struct {
name string
fields fields
args args
want bool
}{
{
name: "ErrUserEmpty and non nil authTokenBundle",
fields: fields{
authTokenBundle: &dummyAuthTokenBundle{},
},
args: args{rpctypes.ErrGRPCUserEmpty, optsWithTrue},
want: true,
},
{
name: "ErrUserEmpty and nil authTokenBundle",
fields: fields{
authTokenBundle: nil,
},
args: args{rpctypes.ErrGRPCUserEmpty, optsWithTrue},
want: false,
},
{
name: "ErrGRPCInvalidAuthToken and retryAuth",
fields: fields{
authTokenBundle: nil,
},
args: args{rpctypes.ErrGRPCInvalidAuthToken, optsWithTrue},
want: true,
},
{
name: "ErrGRPCInvalidAuthToken and !retryAuth",
fields: fields{
authTokenBundle: nil,
},
args: args{rpctypes.ErrGRPCInvalidAuthToken, optsWithFalse},
want: false,
},
{
name: "ErrGRPCAuthOldRevision and retryAuth",
fields: fields{
authTokenBundle: nil,
},
args: args{rpctypes.ErrGRPCAuthOldRevision, optsWithTrue},
want: true,
},
{
name: "ErrGRPCAuthOldRevision and !retryAuth",
fields: fields{
authTokenBundle: nil,
},
args: args{rpctypes.ErrGRPCAuthOldRevision, optsWithFalse},
want: false,
},
{
name: "Other error and retryAuth",
fields: fields{
authTokenBundle: nil,
},
args: args{rpctypes.ErrGRPCAuthFailed, optsWithTrue},
want: false,
},
{
name: "Other error and !retryAuth",
fields: fields{
authTokenBundle: nil,
},
args: args{rpctypes.ErrGRPCAuthFailed, optsWithFalse},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := &Client{
authTokenBundle: tt.fields.authTokenBundle,
}
if got := c.shouldRefreshToken(tt.args.err, tt.args.callOpts); got != tt.want {
t.Errorf("shouldRefreshToken() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -69,8 +69,8 @@ func TestSnapshotV3RestoreMultiMemberAdd(t *testing.T) {
cfg.Name = "3" cfg.Name = "3"
cfg.InitialClusterToken = testClusterTkn cfg.InitialClusterToken = testClusterTkn
cfg.ClusterState = "existing" cfg.ClusterState = "existing"
cfg.ListenClientUrls, cfg.AdvertiseClientUrls = newCURLs, newCURLs cfg.LCUrls, cfg.ACUrls = newCURLs, newCURLs
cfg.ListenPeerUrls, cfg.AdvertisePeerUrls = newPURLs, newPURLs cfg.LPUrls, cfg.APUrls = newPURLs, newPURLs
cfg.InitialCluster = "" cfg.InitialCluster = ""
for i := 0; i < clusterN; i++ { for i := 0; i < clusterN; i++ {
cfg.InitialCluster += fmt.Sprintf(",%d=%s", i, pURLs[i].String()) cfg.InitialCluster += fmt.Sprintf(",%d=%s", i, pURLs[i].String())

View File

@ -391,7 +391,7 @@ func (s *v3Manager) saveDB() error {
be := backend.NewDefaultBackend(dbpath) be := backend.NewDefaultBackend(dbpath)
// a lessor never timeouts leases // a lessor never timeouts leases
lessor := lease.NewLessor(s.lg, be, nil, lease.LessorConfig{MinLeaseTTL: math.MaxInt64}) lessor := lease.NewLessor(s.lg, be, lease.LessorConfig{MinLeaseTTL: math.MaxInt64})
mvs := mvcc.NewStore(s.lg, be, lessor, (*initIndex)(&commit), mvcc.StoreConfig{CompactionBatchLimit: math.MaxInt32}) mvs := mvcc.NewStore(s.lg, be, lessor, (*initIndex)(&commit), mvcc.StoreConfig{CompactionBatchLimit: math.MaxInt32})
txn := mvs.Write(traceutil.TODO()) txn := mvs.Write(traceutil.TODO())

View File

@ -51,8 +51,8 @@ func TestSnapshotV3RestoreSingle(t *testing.T) {
cfg.Name = "s1" cfg.Name = "s1"
cfg.InitialClusterToken = testClusterTkn cfg.InitialClusterToken = testClusterTkn
cfg.ClusterState = "existing" cfg.ClusterState = "existing"
cfg.ListenClientUrls, cfg.AdvertiseClientUrls = cURLs, cURLs cfg.LCUrls, cfg.ACUrls = cURLs, cURLs
cfg.ListenPeerUrls, cfg.AdvertisePeerUrls = pURLs, pURLs cfg.LPUrls, cfg.APUrls = pURLs, pURLs
cfg.InitialCluster = fmt.Sprintf("%s=%s", cfg.Name, pURLs[0].String()) cfg.InitialCluster = fmt.Sprintf("%s=%s", cfg.Name, pURLs[0].String())
cfg.Dir = filepath.Join(os.TempDir(), fmt.Sprint(time.Now().Nanosecond())) cfg.Dir = filepath.Join(os.TempDir(), fmt.Sprint(time.Now().Nanosecond()))
@ -87,8 +87,7 @@ func TestSnapshotV3RestoreSingle(t *testing.T) {
} }
var cli *clientv3.Client var cli *clientv3.Client
cli, err = clientv3.New(clientv3.Config{Endpoints: []string{cfg.AdvertiseClientUrls[0].String()}}) cli, err = clientv3.New(clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -204,8 +203,8 @@ func createSnapshotFile(t *testing.T, kvs []kv) string {
cfg.Debug = false cfg.Debug = false
cfg.Name = "default" cfg.Name = "default"
cfg.ClusterState = "new" cfg.ClusterState = "new"
cfg.ListenClientUrls, cfg.AdvertiseClientUrls = cURLs, cURLs cfg.LCUrls, cfg.ACUrls = cURLs, cURLs
cfg.ListenPeerUrls, cfg.AdvertisePeerUrls = pURLs, pURLs cfg.LPUrls, cfg.APUrls = pURLs, pURLs
cfg.InitialCluster = fmt.Sprintf("%s=%s", cfg.Name, pURLs[0].String()) cfg.InitialCluster = fmt.Sprintf("%s=%s", cfg.Name, pURLs[0].String())
cfg.Dir = filepath.Join(os.TempDir(), fmt.Sprint(time.Now().Nanosecond())) cfg.Dir = filepath.Join(os.TempDir(), fmt.Sprint(time.Now().Nanosecond()))
srv, err := embed.StartEtcd(cfg) srv, err := embed.StartEtcd(cfg)
@ -222,7 +221,7 @@ func createSnapshotFile(t *testing.T, kvs []kv) string {
t.Fatalf("failed to start embed.Etcd for creating snapshots") t.Fatalf("failed to start embed.Etcd for creating snapshots")
} }
ccfg := clientv3.Config{Endpoints: []string{cfg.AdvertiseClientUrls[0].String()}} ccfg := clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}}
cli, err := clientv3.New(ccfg) cli, err := clientv3.New(ccfg)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -272,8 +271,8 @@ func restoreCluster(t *testing.T, clusterN int, dbPath string) (
cfg.Name = fmt.Sprintf("%d", i) cfg.Name = fmt.Sprintf("%d", i)
cfg.InitialClusterToken = testClusterTkn cfg.InitialClusterToken = testClusterTkn
cfg.ClusterState = "existing" cfg.ClusterState = "existing"
cfg.ListenClientUrls, cfg.AdvertiseClientUrls = []url.URL{cURLs[i]}, []url.URL{cURLs[i]} cfg.LCUrls, cfg.ACUrls = []url.URL{cURLs[i]}, []url.URL{cURLs[i]}
cfg.ListenPeerUrls, cfg.AdvertisePeerUrls = []url.URL{pURLs[i]}, []url.URL{pURLs[i]} cfg.LPUrls, cfg.APUrls = []url.URL{pURLs[i]}, []url.URL{pURLs[i]}
cfg.InitialCluster = ics cfg.InitialCluster = ics
cfg.Dir = filepath.Join(os.TempDir(), fmt.Sprint(time.Now().Nanosecond()+i)) cfg.Dir = filepath.Join(os.TempDir(), fmt.Sprint(time.Now().Nanosecond()+i))

View File

@ -33,6 +33,7 @@ import (
// ).Else( // ).Else(
// OpPut(k4,v4), OpPut(k5,v5) // OpPut(k4,v4), OpPut(k5,v5)
// ).Commit() // ).Commit()
//
type Txn interface { type Txn interface {
// If takes a list of comparison. If all comparisons passed in succeed, // If takes a list of comparison. If all comparisons passed in succeed,
// the operations passed into Then() will be executed. Or the operations // the operations passed into Then() will be executed. Or the operations

View File

@ -16,6 +16,9 @@ package clientv3
import ( import (
"math/rand" "math/rand"
"reflect"
"runtime"
"strings"
"time" "time"
) )
@ -29,3 +32,18 @@ func jitterUp(duration time.Duration, jitter float64) time.Duration {
multiplier := jitter * (rand.Float64()*2 - 1) multiplier := jitter * (rand.Float64()*2 - 1)
return time.Duration(float64(duration) * (1 + multiplier)) return time.Duration(float64(duration) * (1 + multiplier))
} }
// Check if the provided function is being called in the op options.
func isOpFuncCalled(op string, opts []OpOption) bool {
for _, opt := range opts {
v := reflect.ValueOf(opt)
if v.Kind() == reflect.Func {
if opFunc := runtime.FuncForPC(v.Pointer()); opFunc != nil {
if strings.Contains(opFunc.Name(), op) {
return true
}
}
}
}
return false
}

View File

@ -37,13 +37,6 @@ const (
EventTypePut = mvccpb.PUT EventTypePut = mvccpb.PUT
closeSendErrTimeout = 250 * time.Millisecond closeSendErrTimeout = 250 * time.Millisecond
// AutoWatchID is the watcher ID passed in WatchStream.Watch when no
// user-provided ID is available. If pass, an ID will automatically be assigned.
AutoWatchID = 0
// InvalidWatchID represents an invalid watch ID and prevents duplication with an existing watch.
InvalidWatchID = -1
) )
type Event mvccpb.Event type Event mvccpb.Event
@ -450,7 +443,7 @@ func (w *watcher) closeStream(wgs *watchGrpcStream) {
func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) { func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) {
// check watch ID for backward compatibility (<= v3.3) // check watch ID for backward compatibility (<= v3.3)
if resp.WatchId == InvalidWatchID || (resp.Canceled && resp.CancelReason != "") { if resp.WatchId == -1 || (resp.Canceled && resp.CancelReason != "") {
w.closeErr = v3rpc.Error(errors.New(resp.CancelReason)) w.closeErr = v3rpc.Error(errors.New(resp.CancelReason))
// failed; no channel // failed; no channel
close(ws.recvc) close(ws.recvc)
@ -481,7 +474,7 @@ func (w *watchGrpcStream) closeSubstream(ws *watcherStream) {
} else if ws.outc != nil { } else if ws.outc != nil {
close(ws.outc) close(ws.outc)
} }
if ws.id != InvalidWatchID { if ws.id != -1 {
delete(w.substreams, ws.id) delete(w.substreams, ws.id)
return return
} }
@ -533,7 +526,6 @@ func (w *watchGrpcStream) run() {
cancelSet := make(map[int64]struct{}) cancelSet := make(map[int64]struct{})
var cur *pb.WatchResponse var cur *pb.WatchResponse
backoff := time.Millisecond
for { for {
select { select {
// Watch() requested // Watch() requested
@ -544,7 +536,7 @@ func (w *watchGrpcStream) run() {
// TODO: pass custom watch ID? // TODO: pass custom watch ID?
ws := &watcherStream{ ws := &watcherStream{
initReq: *wreq, initReq: *wreq,
id: InvalidWatchID, id: -1,
outc: outc, outc: outc,
// unbuffered so resumes won't cause repeat events // unbuffered so resumes won't cause repeat events
recvc: make(chan *WatchResponse), recvc: make(chan *WatchResponse),
@ -658,7 +650,6 @@ func (w *watchGrpcStream) run() {
closeErr = err closeErr = err
return return
} }
backoff = w.backoffIfUnavailable(backoff, err)
if wc, closeErr = w.newWatchClient(); closeErr != nil { if wc, closeErr = w.newWatchClient(); closeErr != nil {
return return
} }
@ -675,7 +666,7 @@ func (w *watchGrpcStream) run() {
return return
case ws := <-w.closingc: case ws := <-w.closingc:
if ws.id != InvalidWatchID { if ws.id != -1 {
// client is closing an established watch; close it on the server proactively instead of waiting // client is closing an established watch; close it on the server proactively instead of waiting
// to close when the next message arrives // to close when the next message arrives
cancelSet[ws.id] = struct{}{} cancelSet[ws.id] = struct{}{}
@ -732,9 +723,9 @@ func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
cancelReason: pbresp.CancelReason, cancelReason: pbresp.CancelReason,
} }
// watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of InvalidWatchID to // watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of -1 to
// indicate they should be broadcast. // indicate they should be broadcast.
if wr.IsProgressNotify() && pbresp.WatchId == InvalidWatchID { if wr.IsProgressNotify() && pbresp.WatchId == -1 {
return w.broadcastResponse(wr) return w.broadcastResponse(wr)
} }
@ -855,7 +846,7 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{
} }
} else { } else {
// current progress of watch; <= store revision // current progress of watch; <= store revision
nextRev = wr.Header.Revision + 1 nextRev = wr.Header.Revision
} }
if len(wr.Events) > 0 { if len(wr.Events) > 0 {
@ -889,7 +880,7 @@ func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
w.resumec = make(chan struct{}) w.resumec = make(chan struct{})
w.joinSubstreams() w.joinSubstreams()
for _, ws := range w.substreams { for _, ws := range w.substreams {
ws.id = InvalidWatchID ws.id = -1
w.resuming = append(w.resuming, ws) w.resuming = append(w.resuming, ws)
} }
// strip out nils, if any // strip out nils, if any
@ -979,21 +970,6 @@ func (w *watchGrpcStream) joinSubstreams() {
var maxBackoff = 100 * time.Millisecond var maxBackoff = 100 * time.Millisecond
func (w *watchGrpcStream) backoffIfUnavailable(backoff time.Duration, err error) time.Duration {
if isUnavailableErr(w.ctx, err) {
// retry, but backoff
if backoff < maxBackoff {
// 25% backoff factor
backoff = backoff + backoff/4
if backoff > maxBackoff {
backoff = maxBackoff
}
}
time.Sleep(backoff)
}
return backoff
}
// openWatchClient retries opening a watch client until success or halt. // openWatchClient retries opening a watch client until success or halt.
// manually retry in case "ws==nil && err==nil" // manually retry in case "ws==nil && err==nil"
// TODO: remove FailFast=false // TODO: remove FailFast=false
@ -1014,7 +990,17 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error)
if isHaltErr(w.ctx, err) { if isHaltErr(w.ctx, err) {
return nil, v3rpc.Error(err) return nil, v3rpc.Error(err)
} }
backoff = w.backoffIfUnavailable(backoff, err) if isUnavailableErr(w.ctx, err) {
// retry, but backoff
if backoff < maxBackoff {
// 25% backoff factor
backoff = backoff + backoff/4
if backoff > maxBackoff {
backoff = maxBackoff
}
}
time.Sleep(backoff)
}
} }
return ws, nil return ws, nil
} }

View File

@ -18,7 +18,6 @@ import (
"crypto/tls" "crypto/tls"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"math"
"net" "net"
"net/http" "net/http"
"net/url" "net/url"
@ -54,9 +53,7 @@ const (
DefaultMaxSnapshots = 5 DefaultMaxSnapshots = 5
DefaultMaxWALs = 5 DefaultMaxWALs = 5
DefaultMaxTxnOps = uint(128) DefaultMaxTxnOps = uint(128)
DefaultWarningApplyDuration = 100 * time.Millisecond
DefaultMaxRequestBytes = 1.5 * 1024 * 1024 DefaultMaxRequestBytes = 1.5 * 1024 * 1024
DefaultMaxConcurrentStreams = math.MaxUint32
DefaultGRPCKeepAliveMinTime = 5 * time.Second DefaultGRPCKeepAliveMinTime = 5 * time.Second
DefaultGRPCKeepAliveInterval = 2 * time.Hour DefaultGRPCKeepAliveInterval = 2 * time.Hour
DefaultGRPCKeepAliveTimeout = 20 * time.Second DefaultGRPCKeepAliveTimeout = 20 * time.Second
@ -179,12 +176,8 @@ type Config struct {
MaxTxnOps uint `json:"max-txn-ops"` MaxTxnOps uint `json:"max-txn-ops"`
MaxRequestBytes uint `json:"max-request-bytes"` MaxRequestBytes uint `json:"max-request-bytes"`
// MaxConcurrentStreams specifies the maximum number of concurrent LPUrls, LCUrls []url.URL
// streams that each client can open at a time. APUrls, ACUrls []url.URL
MaxConcurrentStreams uint32 `json:"max-concurrent-streams"`
ListenPeerUrls, ListenClientUrls, ListenClientHttpUrls []url.URL
AdvertisePeerUrls, AdvertiseClientUrls []url.URL
ClientTLSInfo transport.TLSInfo ClientTLSInfo transport.TLSInfo
ClientAutoTLS bool ClientAutoTLS bool
PeerTLSInfo transport.TLSInfo PeerTLSInfo transport.TLSInfo
@ -195,11 +188,6 @@ type Config struct {
// Note that cipher suites are prioritized in the given order. // Note that cipher suites are prioritized in the given order.
CipherSuites []string `json:"cipher-suites"` CipherSuites []string `json:"cipher-suites"`
// TlsMinVersion is the minimum accepted TLS version between client/server and peers.
TlsMinVersion string `json:"tls-min-version"`
// TlsMaxVersion is the maximum accepted TLS version between client/server and peers.
TlsMaxVersion string `json:"tls-max-version"`
ClusterState string `json:"initial-cluster-state"` ClusterState string `json:"initial-cluster-state"`
DNSCluster string `json:"discovery-srv"` DNSCluster string `json:"discovery-srv"`
DNSClusterServiceName string `json:"discovery-srv-name"` DNSClusterServiceName string `json:"discovery-srv-name"`
@ -285,7 +273,7 @@ type Config struct {
AuthToken string `json:"auth-token"` AuthToken string `json:"auth-token"`
BcryptCost uint `json:"bcrypt-cost"` BcryptCost uint `json:"bcrypt-cost"`
// AuthTokenTTL specifies the TTL in seconds of the simple token //The AuthTokenTTL in seconds of the simple token
AuthTokenTTL uint `json:"auth-token-ttl"` AuthTokenTTL uint `json:"auth-token-ttl"`
ExperimentalInitialCorruptCheck bool `json:"experimental-initial-corrupt-check"` ExperimentalInitialCorruptCheck bool `json:"experimental-initial-corrupt-check"`
@ -293,18 +281,10 @@ type Config struct {
ExperimentalEnableV2V3 string `json:"experimental-enable-v2v3"` ExperimentalEnableV2V3 string `json:"experimental-enable-v2v3"`
// ExperimentalBackendFreelistType specifies the type of freelist that boltdb backend uses (array and map are supported types). // ExperimentalBackendFreelistType specifies the type of freelist that boltdb backend uses (array and map are supported types).
ExperimentalBackendFreelistType string `json:"experimental-backend-bbolt-freelist-type"` ExperimentalBackendFreelistType string `json:"experimental-backend-bbolt-freelist-type"`
// ExperimentalEnableLeaseCheckpoint enables leader to send regular checkpoints to other members to prevent reset of remaining TTL on leader change. // ExperimentalEnableLeaseCheckpoint enables primary lessor to persist lease remainingTTL to prevent indefinite auto-renewal of long lived leases.
ExperimentalEnableLeaseCheckpoint bool `json:"experimental-enable-lease-checkpoint"` ExperimentalEnableLeaseCheckpoint bool `json:"experimental-enable-lease-checkpoint"`
// ExperimentalEnableLeaseCheckpointPersist enables persisting remainingTTL to prevent indefinite auto-renewal of long lived leases. Always enabled in v3.6. Should be used to ensure smooth upgrade from v3.5 clusters with this feature enabled.
// Requires experimental-enable-lease-checkpoint to be enabled.
// Deprecated in v3.6.
// TODO: Delete in v3.7
ExperimentalEnableLeaseCheckpointPersist bool `json:"experimental-enable-lease-checkpoint-persist"`
ExperimentalCompactionBatchLimit int `json:"experimental-compaction-batch-limit"` ExperimentalCompactionBatchLimit int `json:"experimental-compaction-batch-limit"`
ExperimentalWatchProgressNotifyInterval time.Duration `json:"experimental-watch-progress-notify-interval"` ExperimentalWatchProgressNotifyInterval time.Duration `json:"experimental-watch-progress-notify-interval"`
// ExperimentalWarningApplyDuration is the time duration after which a warning is generated if applying request
// takes more time than this value.
ExperimentalWarningApplyDuration time.Duration `json:"experimental-warning-apply-duration"`
// ForceNewCluster starts a new cluster even if previously started; unsafe. // ForceNewCluster starts a new cluster even if previously started; unsafe.
ForceNewCluster bool `json:"force-new-cluster"` ForceNewCluster bool `json:"force-new-cluster"`
@ -373,11 +353,10 @@ type configYAML struct {
// configJSON has file options that are translated into Config options // configJSON has file options that are translated into Config options
type configJSON struct { type configJSON struct {
ListenPeerUrls string `json:"listen-peer-urls"` LPUrlsJSON string `json:"listen-peer-urls"`
ListenClientUrls string `json:"listen-client-urls"` LCUrlsJSON string `json:"listen-client-urls"`
ListenClientHttpUrls string `json:"listen-client-http-urls"` APUrlsJSON string `json:"initial-advertise-peer-urls"`
AdvertisePeerUrls string `json:"initial-advertise-peer-urls"` ACUrlsJSON string `json:"advertise-client-urls"`
AdvertiseClientUrls string `json:"advertise-client-urls"`
CORSJSON string `json:"cors"` CORSJSON string `json:"cors"`
HostWhitelistJSON string `json:"host-whitelist"` HostWhitelistJSON string `json:"host-whitelist"`
@ -411,8 +390,6 @@ func NewConfig() *Config {
MaxTxnOps: DefaultMaxTxnOps, MaxTxnOps: DefaultMaxTxnOps,
MaxRequestBytes: DefaultMaxRequestBytes, MaxRequestBytes: DefaultMaxRequestBytes,
MaxConcurrentStreams: DefaultMaxConcurrentStreams,
ExperimentalWarningApplyDuration: DefaultWarningApplyDuration,
GRPCKeepAliveMinTime: DefaultGRPCKeepAliveMinTime, GRPCKeepAliveMinTime: DefaultGRPCKeepAliveMinTime,
GRPCKeepAliveInterval: DefaultGRPCKeepAliveInterval, GRPCKeepAliveInterval: DefaultGRPCKeepAliveInterval,
@ -422,10 +399,10 @@ func NewConfig() *Config {
ElectionMs: 1000, ElectionMs: 1000,
InitialElectionTickAdvance: true, InitialElectionTickAdvance: true,
ListenPeerUrls: []url.URL{*lpurl}, LPUrls: []url.URL{*lpurl},
ListenClientUrls: []url.URL{*lcurl}, LCUrls: []url.URL{*lcurl},
AdvertisePeerUrls: []url.URL{*apurl}, APUrls: []url.URL{*apurl},
AdvertiseClientUrls: []url.URL{*acurl}, ACUrls: []url.URL{*acurl},
ClusterState: ClusterStateFlagNew, ClusterState: ClusterStateFlagNew,
InitialClusterToken: "etcd-cluster", InitialClusterToken: "etcd-cluster",
@ -490,49 +467,40 @@ func (cfg *configYAML) configFromFile(path string) error {
return err return err
} }
if cfg.configJSON.ListenPeerUrls != "" { if cfg.LPUrlsJSON != "" {
u, err := types.NewURLs(strings.Split(cfg.configJSON.ListenPeerUrls, ",")) u, err := types.NewURLs(strings.Split(cfg.LPUrlsJSON, ","))
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "unexpected error setting up listen-peer-urls: %v\n", err) fmt.Fprintf(os.Stderr, "unexpected error setting up listen-peer-urls: %v\n", err)
os.Exit(1) os.Exit(1)
} }
cfg.Config.ListenPeerUrls = u cfg.LPUrls = []url.URL(u)
} }
if cfg.configJSON.ListenClientUrls != "" { if cfg.LCUrlsJSON != "" {
u, err := types.NewURLs(strings.Split(cfg.configJSON.ListenClientUrls, ",")) u, err := types.NewURLs(strings.Split(cfg.LCUrlsJSON, ","))
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "unexpected error setting up listen-client-urls: %v\n", err) fmt.Fprintf(os.Stderr, "unexpected error setting up listen-client-urls: %v\n", err)
os.Exit(1) os.Exit(1)
} }
cfg.Config.ListenClientUrls = u cfg.LCUrls = []url.URL(u)
} }
if cfg.configJSON.ListenClientHttpUrls != "" { if cfg.APUrlsJSON != "" {
u, err := types.NewURLs(strings.Split(cfg.configJSON.ListenClientHttpUrls, ",")) u, err := types.NewURLs(strings.Split(cfg.APUrlsJSON, ","))
if err != nil {
fmt.Fprintf(os.Stderr, "unexpected error setting up listen-client-http-urls: %v\n", err)
os.Exit(1)
}
cfg.Config.ListenClientHttpUrls = u
}
if cfg.configJSON.AdvertisePeerUrls != "" {
u, err := types.NewURLs(strings.Split(cfg.configJSON.AdvertisePeerUrls, ","))
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "unexpected error setting up initial-advertise-peer-urls: %v\n", err) fmt.Fprintf(os.Stderr, "unexpected error setting up initial-advertise-peer-urls: %v\n", err)
os.Exit(1) os.Exit(1)
} }
cfg.Config.AdvertisePeerUrls = u cfg.APUrls = []url.URL(u)
} }
if cfg.configJSON.AdvertiseClientUrls != "" { if cfg.ACUrlsJSON != "" {
u, err := types.NewURLs(strings.Split(cfg.configJSON.AdvertiseClientUrls, ",")) u, err := types.NewURLs(strings.Split(cfg.ACUrlsJSON, ","))
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "unexpected error setting up advertise-peer-urls: %v\n", err) fmt.Fprintf(os.Stderr, "unexpected error setting up advertise-peer-urls: %v\n", err)
os.Exit(1) os.Exit(1)
} }
cfg.Config.AdvertiseClientUrls = u cfg.ACUrls = []url.URL(u)
} }
if cfg.ListenMetricsUrlsJSON != "" { if cfg.ListenMetricsUrlsJSON != "" {
@ -581,56 +549,39 @@ func updateCipherSuites(tls *transport.TLSInfo, ss []string) error {
return fmt.Errorf("TLSInfo.CipherSuites is already specified (given %v)", ss) return fmt.Errorf("TLSInfo.CipherSuites is already specified (given %v)", ss)
} }
if len(ss) > 0 { if len(ss) > 0 {
cs, err := tlsutil.GetCipherSuites(ss) cs := make([]uint16, len(ss))
if err != nil { for i, s := range ss {
return err var ok bool
cs[i], ok = tlsutil.GetCipherSuite(s)
if !ok {
return fmt.Errorf("unexpected TLS cipher suite %q", s)
}
} }
tls.CipherSuites = cs tls.CipherSuites = cs
} }
return nil return nil
} }
func updateMinMaxVersions(info *transport.TLSInfo, min, max string) {
// Validate() has been called to check the user input, so it should never fail.
var err error
if info.MinVersion, err = tlsutil.GetTLSVersion(min); err != nil {
panic(err)
}
if info.MaxVersion, err = tlsutil.GetTLSVersion(max); err != nil {
panic(err)
}
}
// Validate ensures that '*embed.Config' fields are properly configured. // Validate ensures that '*embed.Config' fields are properly configured.
func (cfg *Config) Validate() error { func (cfg *Config) Validate() error {
if err := cfg.setupLogging(); err != nil { if err := cfg.setupLogging(); err != nil {
return err return err
} }
if err := checkBindURLs(cfg.ListenPeerUrls); err != nil { if err := checkBindURLs(cfg.LPUrls); err != nil {
return err return err
} }
if err := checkBindURLs(cfg.ListenClientUrls); err != nil { if err := checkBindURLs(cfg.LCUrls); err != nil {
return err return err
} }
if err := checkBindURLs(cfg.ListenClientHttpUrls); err != nil {
return err
}
if len(cfg.ListenClientHttpUrls) == 0 {
if cfg.logger != nil {
cfg.logger.Warn("Running http and grpc server on single port. This is not recommended for production.")
} else {
plog.Warning("Running http and grpc server on single port. This is not recommended for production.")
}
}
if err := checkBindURLs(cfg.ListenMetricsUrls); err != nil { if err := checkBindURLs(cfg.ListenMetricsUrls); err != nil {
return err return err
} }
if err := checkHostURLs(cfg.AdvertisePeerUrls); err != nil { if err := checkHostURLs(cfg.APUrls); err != nil {
addrs := cfg.getAdvertisePeerUrls() addrs := cfg.getAPURLs()
return fmt.Errorf(`--initial-advertise-peer-urls %q must be "host:port" (%v)`, strings.Join(addrs, ","), err) return fmt.Errorf(`--initial-advertise-peer-urls %q must be "host:port" (%v)`, strings.Join(addrs, ","), err)
} }
if err := checkHostURLs(cfg.AdvertiseClientUrls); err != nil { if err := checkHostURLs(cfg.ACUrls); err != nil {
addrs := cfg.getAdvertiseClientUrls() addrs := cfg.getACURLs()
return fmt.Errorf(`--advertise-client-urls %q must be "host:port" (%v)`, strings.Join(addrs, ","), err) return fmt.Errorf(`--advertise-client-urls %q must be "host:port" (%v)`, strings.Join(addrs, ","), err)
} }
// Check if conflicting flags are passed. // Check if conflicting flags are passed.
@ -663,7 +614,7 @@ func (cfg *Config) Validate() error {
} }
// check this last since proxying in etcdmain may make this OK // check this last since proxying in etcdmain may make this OK
if cfg.ListenClientUrls != nil && cfg.AdvertiseClientUrls == nil { if cfg.LCUrls != nil && cfg.ACUrls == nil {
return ErrUnsetAdvertiseClientURLsFlag return ErrUnsetAdvertiseClientURLsFlag
} }
@ -674,33 +625,6 @@ func (cfg *Config) Validate() error {
return fmt.Errorf("unknown auto-compaction-mode %q", cfg.AutoCompactionMode) return fmt.Errorf("unknown auto-compaction-mode %q", cfg.AutoCompactionMode)
} }
if !cfg.ExperimentalEnableLeaseCheckpointPersist && cfg.ExperimentalEnableLeaseCheckpoint {
cfg.logger.Warn("Detected that checkpointing is enabled without persistence. Consider enabling experimental-enable-lease-checkpoint-persist")
}
if cfg.ExperimentalEnableLeaseCheckpointPersist && !cfg.ExperimentalEnableLeaseCheckpoint {
return fmt.Errorf("setting experimental-enable-lease-checkpoint-persist requires experimental-enable-lease-checkpoint")
}
minVersion, err := tlsutil.GetTLSVersion(cfg.TlsMinVersion)
if err != nil {
return err
}
maxVersion, err := tlsutil.GetTLSVersion(cfg.TlsMaxVersion)
if err != nil {
return err
}
// maxVersion == 0 means that Go selects the highest available version.
if maxVersion != 0 && minVersion > maxVersion {
return fmt.Errorf("min version (%s) is greater than max version (%s)", cfg.TlsMinVersion, cfg.TlsMaxVersion)
}
// Check if user attempted to configure ciphers for TLS1.3 only: Go does not support that currently.
if minVersion == tls.VersionTLS13 && len(cfg.CipherSuites) > 0 {
return fmt.Errorf("cipher suites cannot be configured when only TLS1.3 is enabled")
}
return nil return nil
} }
@ -712,7 +636,7 @@ func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, tok
urlsmap = types.URLsMap{} urlsmap = types.URLsMap{}
// If using discovery, generate a temporary cluster based on // If using discovery, generate a temporary cluster based on
// self's advertised peer URLs // self's advertised peer URLs
urlsmap[cfg.Name] = cfg.AdvertisePeerUrls urlsmap[cfg.Name] = cfg.APUrls
token = cfg.Durl token = cfg.Durl
case cfg.DNSCluster != "": case cfg.DNSCluster != "":
@ -768,7 +692,7 @@ func (cfg *Config) GetDNSClusterNames() ([]string, error) {
// Use both etcd-server-ssl and etcd-server for discovery. // Use both etcd-server-ssl and etcd-server for discovery.
// Combine the results if both are available. // Combine the results if both are available.
clusterStrs, cerr = srv.GetCluster("https", "etcd-server-ssl"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.AdvertisePeerUrls) clusterStrs, cerr = srv.GetCluster("https", "etcd-server-ssl"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.APUrls)
if cerr != nil { if cerr != nil {
clusterStrs = make([]string, 0) clusterStrs = make([]string, 0)
} }
@ -779,13 +703,13 @@ func (cfg *Config) GetDNSClusterNames() ([]string, error) {
zap.String("service-name", "etcd-server-ssl"+serviceNameSuffix), zap.String("service-name", "etcd-server-ssl"+serviceNameSuffix),
zap.String("server-name", cfg.Name), zap.String("server-name", cfg.Name),
zap.String("discovery-srv", cfg.DNSCluster), zap.String("discovery-srv", cfg.DNSCluster),
zap.Strings("advertise-peer-urls", cfg.getAdvertisePeerUrls()), zap.Strings("advertise-peer-urls", cfg.getAPURLs()),
zap.Strings("found-cluster", clusterStrs), zap.Strings("found-cluster", clusterStrs),
zap.Error(cerr), zap.Error(cerr),
) )
} }
defaultHTTPClusterStrs, httpCerr := srv.GetCluster("http", "etcd-server"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.AdvertisePeerUrls) defaultHTTPClusterStrs, httpCerr := srv.GetCluster("http", "etcd-server"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.APUrls)
if httpCerr != nil { if httpCerr != nil {
clusterStrs = append(clusterStrs, defaultHTTPClusterStrs...) clusterStrs = append(clusterStrs, defaultHTTPClusterStrs...)
} }
@ -796,7 +720,7 @@ func (cfg *Config) GetDNSClusterNames() ([]string, error) {
zap.String("service-name", "etcd-server"+serviceNameSuffix), zap.String("service-name", "etcd-server"+serviceNameSuffix),
zap.String("server-name", cfg.Name), zap.String("server-name", cfg.Name),
zap.String("discovery-srv", cfg.DNSCluster), zap.String("discovery-srv", cfg.DNSCluster),
zap.Strings("advertise-peer-urls", cfg.getAdvertisePeerUrls()), zap.Strings("advertise-peer-urls", cfg.getAPURLs()),
zap.Strings("found-cluster", clusterStrs), zap.Strings("found-cluster", clusterStrs),
zap.Error(httpCerr), zap.Error(httpCerr),
) )
@ -806,15 +730,15 @@ func (cfg *Config) GetDNSClusterNames() ([]string, error) {
} }
func (cfg Config) InitialClusterFromName(name string) (ret string) { func (cfg Config) InitialClusterFromName(name string) (ret string) {
if len(cfg.AdvertisePeerUrls) == 0 { if len(cfg.APUrls) == 0 {
return "" return ""
} }
n := name n := name
if name == "" { if name == "" {
n = DefaultName n = DefaultName
} }
for i := range cfg.AdvertisePeerUrls { for i := range cfg.APUrls {
ret = ret + "," + n + "=" + cfg.AdvertisePeerUrls[i].String() ret = ret + "," + n + "=" + cfg.APUrls[i].String()
} }
return ret[1:] return ret[1:]
} }
@ -823,11 +747,11 @@ func (cfg Config) IsNewCluster() bool { return cfg.ClusterState == ClusterStateF
func (cfg Config) ElectionTicks() int { return int(cfg.ElectionMs / cfg.TickMs) } func (cfg Config) ElectionTicks() int { return int(cfg.ElectionMs / cfg.TickMs) }
func (cfg Config) defaultPeerHost() bool { func (cfg Config) defaultPeerHost() bool {
return len(cfg.AdvertisePeerUrls) == 1 && cfg.AdvertisePeerUrls[0].String() == DefaultInitialAdvertisePeerURLs return len(cfg.APUrls) == 1 && cfg.APUrls[0].String() == DefaultInitialAdvertisePeerURLs
} }
func (cfg Config) defaultClientHost() bool { func (cfg Config) defaultClientHost() bool {
return len(cfg.AdvertiseClientUrls) == 1 && cfg.AdvertiseClientUrls[0].String() == DefaultAdvertiseClientURLs return len(cfg.ACUrls) == 1 && cfg.ACUrls[0].String() == DefaultAdvertiseClientURLs
} }
func (cfg *Config) ClientSelfCert() (err error) { func (cfg *Config) ClientSelfCert() (err error) {
@ -842,12 +766,9 @@ func (cfg *Config) ClientSelfCert() (err error) {
} }
return nil return nil
} }
chosts := make([]string, 0, len(cfg.ListenClientUrls)+len(cfg.ListenClientHttpUrls)) chosts := make([]string, len(cfg.LCUrls))
for _, u := range cfg.ListenClientUrls { for i, u := range cfg.LCUrls {
chosts = append(chosts, u.Host) chosts[i] = u.Host
}
for _, u := range cfg.ListenClientHttpUrls {
chosts = append(chosts, u.Host)
} }
cfg.ClientTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "client"), chosts) cfg.ClientTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "client"), chosts)
if err != nil { if err != nil {
@ -868,8 +789,8 @@ func (cfg *Config) PeerSelfCert() (err error) {
} }
return nil return nil
} }
phosts := make([]string, len(cfg.ListenPeerUrls)) phosts := make([]string, len(cfg.LPUrls))
for i, u := range cfg.ListenPeerUrls { for i, u := range cfg.LPUrls {
phosts[i] = u.Host phosts[i] = u.Host
} }
cfg.PeerTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "peer"), phosts) cfg.PeerTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "peer"), phosts)
@ -897,9 +818,9 @@ func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (s
} }
used := false used := false
pip, pport := cfg.ListenPeerUrls[0].Hostname(), cfg.ListenPeerUrls[0].Port() pip, pport := cfg.LPUrls[0].Hostname(), cfg.LPUrls[0].Port()
if cfg.defaultPeerHost() && pip == "0.0.0.0" { if cfg.defaultPeerHost() && pip == "0.0.0.0" {
cfg.AdvertisePeerUrls[0] = url.URL{Scheme: cfg.AdvertisePeerUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, pport)} cfg.APUrls[0] = url.URL{Scheme: cfg.APUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, pport)}
used = true used = true
} }
// update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc') // update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc')
@ -907,9 +828,9 @@ func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (s
cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
} }
cip, cport := cfg.ListenClientUrls[0].Hostname(), cfg.ListenClientUrls[0].Port() cip, cport := cfg.LCUrls[0].Hostname(), cfg.LCUrls[0].Port()
if cfg.defaultClientHost() && cip == "0.0.0.0" { if cfg.defaultClientHost() && cip == "0.0.0.0" {
cfg.AdvertiseClientUrls[0] = url.URL{Scheme: cfg.AdvertiseClientUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, cport)} cfg.ACUrls[0] = url.URL{Scheme: cfg.ACUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, cport)}
used = true used = true
} }
dhost := defaultHostname dhost := defaultHostname
@ -954,42 +875,34 @@ func checkHostURLs(urls []url.URL) error {
return nil return nil
} }
func (cfg *Config) getAdvertisePeerUrls() (ss []string) { func (cfg *Config) getAPURLs() (ss []string) {
ss = make([]string, len(cfg.AdvertisePeerUrls)) ss = make([]string, len(cfg.APUrls))
for i := range cfg.AdvertisePeerUrls { for i := range cfg.APUrls {
ss[i] = cfg.AdvertisePeerUrls[i].String() ss[i] = cfg.APUrls[i].String()
} }
return ss return ss
} }
func (cfg *Config) getListenPeerUrls() (ss []string) { func (cfg *Config) getLPURLs() (ss []string) {
ss = make([]string, len(cfg.ListenPeerUrls)) ss = make([]string, len(cfg.LPUrls))
for i := range cfg.ListenPeerUrls { for i := range cfg.LPUrls {
ss[i] = cfg.ListenPeerUrls[i].String() ss[i] = cfg.LPUrls[i].String()
} }
return ss return ss
} }
func (cfg *Config) getAdvertiseClientUrls() (ss []string) { func (cfg *Config) getACURLs() (ss []string) {
ss = make([]string, len(cfg.AdvertiseClientUrls)) ss = make([]string, len(cfg.ACUrls))
for i := range cfg.AdvertiseClientUrls { for i := range cfg.ACUrls {
ss[i] = cfg.AdvertiseClientUrls[i].String() ss[i] = cfg.ACUrls[i].String()
} }
return ss return ss
} }
func (cfg *Config) getListenClientUrls() (ss []string) { func (cfg *Config) getLCURLs() (ss []string) {
ss = make([]string, len(cfg.ListenClientUrls)) ss = make([]string, len(cfg.LCUrls))
for i := range cfg.ListenClientUrls { for i := range cfg.LCUrls {
ss[i] = cfg.ListenClientUrls[i].String() ss[i] = cfg.LCUrls[i].String()
}
return ss
}
func (cfg *Config) getListenClientHttpUrls() (ss []string) {
ss = make([]string, len(cfg.ListenClientHttpUrls))
for i := range cfg.ListenClientHttpUrls {
ss[i] = cfg.ListenClientHttpUrls[i].String()
} }
return ss return ss
} }

View File

@ -196,15 +196,11 @@ func (cfg *Config) setupLogging() error {
grpcLogOnce.Do(func() { grpcLogOnce.Do(func() {
// debug true, enable info, warning, error // debug true, enable info, warning, error
// debug false, only discard info // debug false, only discard info
if cfg.LogLevel == "debug" {
var gl grpclog.LoggerV2 var gl grpclog.LoggerV2
gl, err = logutil.NewGRPCLoggerV2(copied) gl, err = logutil.NewGRPCLoggerV2(copied)
if err == nil { if err == nil {
grpclog.SetLoggerV2(gl) grpclog.SetLoggerV2(gl)
} }
} else {
grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr))
}
}) })
return nil return nil
} }
@ -249,11 +245,7 @@ func (cfg *Config) setupLogging() error {
c.loggerWriteSyncer = syncer c.loggerWriteSyncer = syncer
grpcLogOnce.Do(func() { grpcLogOnce.Do(func() {
if cfg.LogLevel == "debug" {
grpclog.SetLoggerV2(logutil.NewGRPCLoggerV2FromZapCore(cr, syncer)) grpclog.SetLoggerV2(logutil.NewGRPCLoggerV2FromZapCore(cr, syncer))
} else {
grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr))
}
}) })
return nil return nil
} }

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build !windows
// +build !windows // +build !windows
package embed package embed

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build windows
// +build windows // +build windows
package embed package embed

View File

@ -15,7 +15,6 @@
package embed package embed
import ( import (
"crypto/tls"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net/url" "net/url"
@ -23,7 +22,6 @@ import (
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/assert"
"go.etcd.io/etcd/pkg/transport" "go.etcd.io/etcd/pkg/transport"
"sigs.k8s.io/yaml" "sigs.k8s.io/yaml"
@ -77,12 +75,12 @@ func TestConfigFileOtherFields(t *testing.T) {
func TestUpdateDefaultClusterFromName(t *testing.T) { func TestUpdateDefaultClusterFromName(t *testing.T) {
cfg := NewConfig() cfg := NewConfig()
defaultInitialCluster := cfg.InitialCluster defaultInitialCluster := cfg.InitialCluster
oldscheme := cfg.AdvertisePeerUrls[0].Scheme oldscheme := cfg.APUrls[0].Scheme
origpeer := cfg.AdvertisePeerUrls[0].String() origpeer := cfg.APUrls[0].String()
origadvc := cfg.AdvertiseClientUrls[0].String() origadvc := cfg.ACUrls[0].String()
cfg.Name = "abc" cfg.Name = "abc"
lpport := cfg.ListenPeerUrls[0].Port() lpport := cfg.LPUrls[0].Port()
// in case of 'etcd --name=abc' // in case of 'etcd --name=abc'
exp := fmt.Sprintf("%s=%s://localhost:%s", cfg.Name, oldscheme, lpport) exp := fmt.Sprintf("%s=%s://localhost:%s", cfg.Name, oldscheme, lpport)
@ -91,12 +89,12 @@ func TestUpdateDefaultClusterFromName(t *testing.T) {
t.Fatalf("initial-cluster expected %q, got %q", exp, cfg.InitialCluster) t.Fatalf("initial-cluster expected %q, got %q", exp, cfg.InitialCluster)
} }
// advertise peer URL should not be affected // advertise peer URL should not be affected
if origpeer != cfg.AdvertisePeerUrls[0].String() { if origpeer != cfg.APUrls[0].String() {
t.Fatalf("advertise peer url expected %q, got %q", origadvc, cfg.AdvertisePeerUrls[0].String()) t.Fatalf("advertise peer url expected %q, got %q", origadvc, cfg.APUrls[0].String())
} }
// advertise client URL should not be affected // advertise client URL should not be affected
if origadvc != cfg.AdvertiseClientUrls[0].String() { if origadvc != cfg.ACUrls[0].String() {
t.Fatalf("advertise client url expected %q, got %q", origadvc, cfg.AdvertiseClientUrls[0].String()) t.Fatalf("advertise client url expected %q, got %q", origadvc, cfg.ACUrls[0].String())
} }
} }
@ -109,17 +107,17 @@ func TestUpdateDefaultClusterFromNameOverwrite(t *testing.T) {
cfg := NewConfig() cfg := NewConfig()
defaultInitialCluster := cfg.InitialCluster defaultInitialCluster := cfg.InitialCluster
oldscheme := cfg.AdvertisePeerUrls[0].Scheme oldscheme := cfg.APUrls[0].Scheme
origadvc := cfg.AdvertiseClientUrls[0].String() origadvc := cfg.ACUrls[0].String()
cfg.Name = "abc" cfg.Name = "abc"
lpport := cfg.ListenPeerUrls[0].Port() lpport := cfg.LPUrls[0].Port()
cfg.ListenPeerUrls[0] = url.URL{Scheme: cfg.ListenPeerUrls[0].Scheme, Host: fmt.Sprintf("0.0.0.0:%s", lpport)} cfg.LPUrls[0] = url.URL{Scheme: cfg.LPUrls[0].Scheme, Host: fmt.Sprintf("0.0.0.0:%s", lpport)}
dhost, _ := cfg.UpdateDefaultClusterFromName(defaultInitialCluster) dhost, _ := cfg.UpdateDefaultClusterFromName(defaultInitialCluster)
if dhost != defaultHostname { if dhost != defaultHostname {
t.Fatalf("expected default host %q, got %q", defaultHostname, dhost) t.Fatalf("expected default host %q, got %q", defaultHostname, dhost)
} }
aphost, apport := cfg.AdvertisePeerUrls[0].Hostname(), cfg.AdvertisePeerUrls[0].Port() aphost, apport := cfg.APUrls[0].Hostname(), cfg.APUrls[0].Port()
if apport != lpport { if apport != lpport {
t.Fatalf("advertise peer url got different port %s, expected %s", apport, lpport) t.Fatalf("advertise peer url got different port %s, expected %s", apport, lpport)
} }
@ -132,8 +130,8 @@ func TestUpdateDefaultClusterFromNameOverwrite(t *testing.T) {
} }
// advertise client URL should not be affected // advertise client URL should not be affected
if origadvc != cfg.AdvertiseClientUrls[0].String() { if origadvc != cfg.ACUrls[0].String() {
t.Fatalf("advertise-client-url expected %q, got %q", origadvc, cfg.AdvertiseClientUrls[0].String()) t.Fatalf("advertise-client-url expected %q, got %q", origadvc, cfg.ACUrls[0].String())
} }
} }
@ -204,80 +202,3 @@ func TestAutoCompactionModeParse(t *testing.T) {
} }
} }
} }
func TestTLSVersionMinMax(t *testing.T) {
tests := []struct {
name string
givenTLSMinVersion string
givenTLSMaxVersion string
givenCipherSuites []string
expectError bool
expectedMinTLSVersion uint16
expectedMaxTLSVersion uint16
}{
{
name: "Minimum TLS version is set",
givenTLSMinVersion: "TLS1.3",
expectedMinTLSVersion: tls.VersionTLS13,
expectedMaxTLSVersion: 0,
},
{
name: "Maximum TLS version is set",
givenTLSMaxVersion: "TLS1.2",
expectedMinTLSVersion: 0,
expectedMaxTLSVersion: tls.VersionTLS12,
},
{
name: "Minimum and Maximum TLS versions are set",
givenTLSMinVersion: "TLS1.3",
givenTLSMaxVersion: "TLS1.3",
expectedMinTLSVersion: tls.VersionTLS13,
expectedMaxTLSVersion: tls.VersionTLS13,
},
{
name: "Minimum and Maximum TLS versions are set in reverse order",
givenTLSMinVersion: "TLS1.3",
givenTLSMaxVersion: "TLS1.2",
expectError: true,
},
{
name: "Invalid minimum TLS version",
givenTLSMinVersion: "invalid version",
expectError: true,
},
{
name: "Invalid maximum TLS version",
givenTLSMaxVersion: "invalid version",
expectError: true,
},
{
name: "Cipher suites configured for TLS 1.3",
givenTLSMinVersion: "TLS1.3",
givenCipherSuites: []string{"TLS_AES_128_GCM_SHA256"},
expectError: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cfg := NewConfig()
cfg.TlsMinVersion = tt.givenTLSMinVersion
cfg.TlsMaxVersion = tt.givenTLSMaxVersion
cfg.CipherSuites = tt.givenCipherSuites
err := cfg.Validate()
if err != nil {
assert.True(t, tt.expectError, "Validate() returned error while expecting success: %v", err)
return
}
updateMinMaxVersions(&cfg.PeerTLSInfo, cfg.TlsMinVersion, cfg.TlsMaxVersion)
updateMinMaxVersions(&cfg.ClientTLSInfo, cfg.TlsMinVersion, cfg.TlsMaxVersion)
assert.Equal(t, tt.expectedMinTLSVersion, cfg.PeerTLSInfo.MinVersion)
assert.Equal(t, tt.expectedMaxTLSVersion, cfg.PeerTLSInfo.MaxVersion)
assert.Equal(t, tt.expectedMinTLSVersion, cfg.ClientTLSInfo.MinVersion)
assert.Equal(t, tt.expectedMaxTLSVersion, cfg.ClientTLSInfo.MaxVersion)
})
}
}

View File

@ -20,7 +20,6 @@ import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
defaultLog "log" defaultLog "log"
"math"
"net" "net"
"net/http" "net/http"
"net/url" "net/url"
@ -30,7 +29,6 @@ import (
"sync" "sync"
"time" "time"
"go.etcd.io/etcd/clientv3/credentials"
"go.etcd.io/etcd/etcdserver" "go.etcd.io/etcd/etcdserver"
"go.etcd.io/etcd/etcdserver/api/etcdhttp" "go.etcd.io/etcd/etcdserver/api/etcdhttp"
"go.etcd.io/etcd/etcdserver/api/rafthttp" "go.etcd.io/etcd/etcdserver/api/rafthttp"
@ -118,7 +116,7 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
if e.cfg.logger != nil { if e.cfg.logger != nil {
e.cfg.logger.Info( e.cfg.logger.Info(
"configuring peer listeners", "configuring peer listeners",
zap.Strings("listen-peer-urls", e.cfg.getListenPeerUrls()), zap.Strings("listen-peer-urls", e.cfg.getLPURLs()),
) )
} }
if e.Peers, err = configurePeerListeners(cfg); err != nil { if e.Peers, err = configurePeerListeners(cfg); err != nil {
@ -128,7 +126,7 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
if e.cfg.logger != nil { if e.cfg.logger != nil {
e.cfg.logger.Info( e.cfg.logger.Info(
"configuring client listeners", "configuring client listeners",
zap.Strings("listen-client-urls", e.cfg.getListenClientUrls()), zap.Strings("listen-client-urls", e.cfg.getLCURLs()),
) )
} }
if e.sctxs, err = configureClientListeners(cfg); err != nil { if e.sctxs, err = configureClientListeners(cfg); err != nil {
@ -165,8 +163,8 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
srvcfg := etcdserver.ServerConfig{ srvcfg := etcdserver.ServerConfig{
Name: cfg.Name, Name: cfg.Name,
ClientURLs: cfg.AdvertiseClientUrls, ClientURLs: cfg.ACUrls,
PeerURLs: cfg.AdvertisePeerUrls, PeerURLs: cfg.APUrls,
DataDir: cfg.Dir, DataDir: cfg.Dir,
DedicatedWALDir: cfg.WalDir, DedicatedWALDir: cfg.WalDir,
SnapshotCount: cfg.SnapshotCount, SnapshotCount: cfg.SnapshotCount,
@ -190,7 +188,6 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
BackendBatchInterval: cfg.BackendBatchInterval, BackendBatchInterval: cfg.BackendBatchInterval,
MaxTxnOps: cfg.MaxTxnOps, MaxTxnOps: cfg.MaxTxnOps,
MaxRequestBytes: cfg.MaxRequestBytes, MaxRequestBytes: cfg.MaxRequestBytes,
MaxConcurrentStreams: cfg.MaxConcurrentStreams,
StrictReconfigCheck: cfg.StrictReconfigCheck, StrictReconfigCheck: cfg.StrictReconfigCheck,
ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth, ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth,
AuthToken: cfg.AuthToken, AuthToken: cfg.AuthToken,
@ -210,10 +207,8 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
EnableGRPCGateway: cfg.EnableGRPCGateway, EnableGRPCGateway: cfg.EnableGRPCGateway,
UnsafeNoFsync: cfg.UnsafeNoFsync, UnsafeNoFsync: cfg.UnsafeNoFsync,
EnableLeaseCheckpoint: cfg.ExperimentalEnableLeaseCheckpoint, EnableLeaseCheckpoint: cfg.ExperimentalEnableLeaseCheckpoint,
LeaseCheckpointPersist: cfg.ExperimentalEnableLeaseCheckpointPersist,
CompactionBatchLimit: cfg.ExperimentalCompactionBatchLimit, CompactionBatchLimit: cfg.ExperimentalCompactionBatchLimit,
WatchProgressNotifyInterval: cfg.ExperimentalWatchProgressNotifyInterval, WatchProgressNotifyInterval: cfg.ExperimentalWatchProgressNotifyInterval,
WarningApplyDuration: cfg.ExperimentalWarningApplyDuration,
} }
print(e.cfg.logger, *cfg, srvcfg, memberInitialized) print(e.cfg.logger, *cfg, srvcfg, memberInitialized)
if e.Server, err = etcdserver.NewServer(srvcfg); err != nil { if e.Server, err = etcdserver.NewServer(srvcfg); err != nil {
@ -249,10 +244,10 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
e.cfg.logger.Info( e.cfg.logger.Info(
"now serving peer/client/metrics", "now serving peer/client/metrics",
zap.String("local-member-id", e.Server.ID().String()), zap.String("local-member-id", e.Server.ID().String()),
zap.Strings("initial-advertise-peer-urls", e.cfg.getAdvertisePeerUrls()), zap.Strings("initial-advertise-peer-urls", e.cfg.getAPURLs()),
zap.Strings("listen-peer-urls", e.cfg.getListenPeerUrls()), zap.Strings("listen-peer-urls", e.cfg.getLPURLs()),
zap.Strings("advertise-client-urls", e.cfg.getAdvertiseClientUrls()), zap.Strings("advertise-client-urls", e.cfg.getACURLs()),
zap.Strings("listen-client-urls", e.cfg.getListenClientUrls()), zap.Strings("listen-client-urls", e.cfg.getLCURLs()),
zap.Strings("listen-metrics-urls", e.cfg.getMetricsURLs()), zap.Strings("listen-metrics-urls", e.cfg.getMetricsURLs()),
) )
} }
@ -324,23 +319,18 @@ func print(lg *zap.Logger, ec Config, sc etcdserver.ServerConfig, memberInitiali
zap.String("election-timeout", fmt.Sprintf("%v", time.Duration(sc.ElectionTicks*int(sc.TickMs))*time.Millisecond)), zap.String("election-timeout", fmt.Sprintf("%v", time.Duration(sc.ElectionTicks*int(sc.TickMs))*time.Millisecond)),
zap.Bool("initial-election-tick-advance", sc.InitialElectionTickAdvance), zap.Bool("initial-election-tick-advance", sc.InitialElectionTickAdvance),
zap.Uint64("snapshot-count", sc.SnapshotCount), zap.Uint64("snapshot-count", sc.SnapshotCount),
zap.Uint("max-wals", sc.MaxWALFiles),
zap.Uint("max-snapshots", sc.MaxSnapFiles),
zap.Uint64("snapshot-catchup-entries", sc.SnapshotCatchUpEntries), zap.Uint64("snapshot-catchup-entries", sc.SnapshotCatchUpEntries),
zap.Strings("initial-advertise-peer-urls", ec.getAdvertisePeerUrls()), zap.Strings("initial-advertise-peer-urls", ec.getAPURLs()),
zap.Strings("listen-peer-urls", ec.getListenPeerUrls()), zap.Strings("listen-peer-urls", ec.getLPURLs()),
zap.Strings("advertise-client-urls", ec.getAdvertiseClientUrls()), zap.Strings("advertise-client-urls", ec.getACURLs()),
zap.Strings("listen-client-urls", ec.getListenClientUrls()), zap.Strings("listen-client-urls", ec.getLCURLs()),
zap.Strings("listen-metrics-urls", ec.getMetricsURLs()), zap.Strings("listen-metrics-urls", ec.getMetricsURLs()),
zap.Strings("cors", cors), zap.Strings("cors", cors),
zap.Strings("host-whitelist", hss), zap.Strings("host-whitelist", hss),
zap.String("initial-cluster", sc.InitialPeerURLsMap.String()), zap.String("initial-cluster", sc.InitialPeerURLsMap.String()),
zap.String("initial-cluster-state", ec.ClusterState), zap.String("initial-cluster-state", ec.ClusterState),
zap.String("initial-cluster-token", sc.InitialClusterToken), zap.String("initial-cluster-token", sc.InitialClusterToken),
zap.Int64("quota-backend-bytes", quota), zap.Int64("quota-size-bytes", quota),
zap.Uint("max-request-bytes", sc.MaxRequestBytes),
zap.Uint32("max-concurrent-streams", sc.MaxConcurrentStreams),
zap.Bool("pre-vote", sc.PreVote), zap.Bool("pre-vote", sc.PreVote),
zap.Bool("initial-corrupt-check", sc.InitialCorruptCheck), zap.Bool("initial-corrupt-check", sc.InitialCorruptCheck),
zap.String("corrupt-check-time-interval", sc.CorruptCheckTime.String()), zap.String("corrupt-check-time-interval", sc.CorruptCheckTime.String()),
@ -365,8 +355,8 @@ func (e *Etcd) Close() {
fields := []zap.Field{ fields := []zap.Field{
zap.String("name", e.cfg.Name), zap.String("name", e.cfg.Name),
zap.String("data-dir", e.cfg.Dir), zap.String("data-dir", e.cfg.Dir),
zap.Strings("advertise-peer-urls", e.cfg.getAdvertisePeerUrls()), zap.Strings("advertise-peer-urls", e.cfg.getAPURLs()),
zap.Strings("advertise-client-urls", e.cfg.getAdvertiseClientUrls()), zap.Strings("advertise-client-urls", e.cfg.getACURLs()),
} }
lg := e.GetLogger() lg := e.GetLogger()
if lg != nil { if lg != nil {
@ -434,7 +424,7 @@ func stopServers(ctx context.Context, ss *servers) {
// do not grpc.Server.GracefulStop with TLS enabled etcd server // do not grpc.Server.GracefulStop with TLS enabled etcd server
// See https://github.com/grpc/grpc-go/issues/1384#issuecomment-317124531 // See https://github.com/grpc/grpc-go/issues/1384#issuecomment-317124531
// and https://github.com/etcd-io/etcd/issues/8916 // and https://github.com/etcd-io/etcd/issues/8916
if ss.secure && ss.http != nil { if ss.secure {
shutdownNow() shutdownNow()
return return
} }
@ -473,9 +463,6 @@ func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) {
plog.Fatalf("could not get certs (%v)", err) plog.Fatalf("could not get certs (%v)", err)
} }
} }
updateMinMaxVersions(&cfg.PeerTLSInfo, cfg.TlsMinVersion, cfg.TlsMaxVersion)
if !cfg.PeerTLSInfo.Empty() { if !cfg.PeerTLSInfo.Empty() {
if cfg.logger != nil { if cfg.logger != nil {
cfg.logger.Info( cfg.logger.Info(
@ -488,7 +475,7 @@ func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) {
} }
} }
peers = make([]*peerListener, len(cfg.ListenPeerUrls)) peers = make([]*peerListener, len(cfg.LPUrls))
defer func() { defer func() {
if err == nil { if err == nil {
return return
@ -498,11 +485,11 @@ func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) {
if cfg.logger != nil { if cfg.logger != nil {
cfg.logger.Warn( cfg.logger.Warn(
"closing peer listener", "closing peer listener",
zap.String("address", cfg.ListenPeerUrls[i].String()), zap.String("address", cfg.LPUrls[i].String()),
zap.Error(err), zap.Error(err),
) )
} else { } else {
plog.Info("stopping listening for peers on ", cfg.ListenPeerUrls[i].String()) plog.Info("stopping listening for peers on ", cfg.LPUrls[i].String())
} }
ctx, cancel := context.WithTimeout(context.Background(), time.Second) ctx, cancel := context.WithTimeout(context.Background(), time.Second)
peers[i].close(ctx) peers[i].close(ctx)
@ -511,7 +498,7 @@ func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) {
} }
}() }()
for i, u := range cfg.ListenPeerUrls { for i, u := range cfg.LPUrls {
if u.Scheme == "http" { if u.Scheme == "http" {
if !cfg.PeerTLSInfo.Empty() { if !cfg.PeerTLSInfo.Empty() {
if cfg.logger != nil { if cfg.logger != nil {
@ -613,9 +600,6 @@ func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err erro
plog.Fatalf("could not get certs (%v)", err) plog.Fatalf("could not get certs (%v)", err)
} }
} }
updateMinMaxVersions(&cfg.ClientTLSInfo, cfg.TlsMinVersion, cfg.TlsMaxVersion)
if cfg.EnablePprof { if cfg.EnablePprof {
if cfg.logger != nil { if cfg.logger != nil {
cfg.logger.Info("pprof is enabled", zap.String("path", debugutil.HTTPPrefixPProf)) cfg.logger.Info("pprof is enabled", zap.String("path", debugutil.HTTPPrefixPProf))
@ -625,7 +609,8 @@ func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err erro
} }
sctxs = make(map[string]*serveCtx) sctxs = make(map[string]*serveCtx)
for _, u := range append(cfg.ListenClientUrls, cfg.ListenClientHttpUrls...) { for _, u := range cfg.LCUrls {
sctx := newServeCtx(cfg.logger)
if u.Scheme == "http" || u.Scheme == "unix" { if u.Scheme == "http" || u.Scheme == "unix" {
if !cfg.ClientTLSInfo.Empty() { if !cfg.ClientTLSInfo.Empty() {
if cfg.logger != nil { if cfg.logger != nil {
@ -645,45 +630,29 @@ func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err erro
if (u.Scheme == "https" || u.Scheme == "unixs") && cfg.ClientTLSInfo.Empty() { if (u.Scheme == "https" || u.Scheme == "unixs") && cfg.ClientTLSInfo.Empty() {
return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPS scheme", u.String()) return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPS scheme", u.String())
} }
}
for _, u := range cfg.ListenClientUrls { network := "tcp"
addr, secure, network := resolveUrl(u) addr := u.Host
sctx := sctxs[addr] if u.Scheme == "unix" || u.Scheme == "unixs" {
if sctx == nil { network = "unix"
sctx = newServeCtx(cfg.logger) addr = u.Host + u.Path
sctxs[addr] = sctx
} }
sctx.secure = sctx.secure || secure
sctx.insecure = sctx.insecure || !secure
sctx.scheme = u.Scheme
sctx.addr = addr
sctx.network = network sctx.network = network
}
for _, u := range cfg.ListenClientHttpUrls {
addr, secure, network := resolveUrl(u)
sctx := sctxs[addr] sctx.secure = u.Scheme == "https" || u.Scheme == "unixs"
if sctx == nil { sctx.insecure = !sctx.secure
sctx = newServeCtx(cfg.logger) if oldctx := sctxs[addr]; oldctx != nil {
sctxs[addr] = sctx oldctx.secure = oldctx.secure || sctx.secure
} else if !sctx.httpOnly { oldctx.insecure = oldctx.insecure || sctx.insecure
return nil, fmt.Errorf("cannot bind both --client-listen-urls and --client-listen-http-urls on the same url %s", u.String()) continue
}
sctx.secure = sctx.secure || secure
sctx.insecure = sctx.insecure || !secure
sctx.scheme = u.Scheme
sctx.addr = addr
sctx.network = network
sctx.httpOnly = true
} }
for _, sctx := range sctxs { if sctx.l, err = net.Listen(network, addr); err != nil {
if sctx.l, err = net.Listen(sctx.network, sctx.addr); err != nil {
return nil, err return nil, err
} }
// net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking // net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking
// hosts that disable ipv6. So, use the address given by the user. // hosts that disable ipv6. So, use the address given by the user.
sctx.addr = addr
if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil { if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil {
if fdLimit <= reservedInternalFDNum { if fdLimit <= reservedInternalFDNum {
@ -700,27 +669,27 @@ func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err erro
sctx.l = transport.LimitListener(sctx.l, int(fdLimit-reservedInternalFDNum)) sctx.l = transport.LimitListener(sctx.l, int(fdLimit-reservedInternalFDNum))
} }
if sctx.network == "tcp" { if network == "tcp" {
if sctx.l, err = transport.NewKeepAliveListener(sctx.l, sctx.network, nil); err != nil { if sctx.l, err = transport.NewKeepAliveListener(sctx.l, network, nil); err != nil {
return nil, err return nil, err
} }
} }
defer func(sctx *serveCtx) { defer func() {
if err == nil || sctx.l == nil { if err == nil {
return return
} }
sctx.l.Close() sctx.l.Close()
if cfg.logger != nil { if cfg.logger != nil {
cfg.logger.Warn( cfg.logger.Warn(
"closing peer listener", "closing peer listener",
zap.String("address", sctx.addr), zap.String("address", u.Host),
zap.Error(err), zap.Error(err),
) )
} else { } else {
plog.Info("stopping listening for client requests on ", sctx.addr) plog.Info("stopping listening for client requests on ", u.Host)
} }
}(sctx) }()
for k := range cfg.UserHandlers { for k := range cfg.UserHandlers {
sctx.userHandlers[k] = cfg.UserHandlers[k] sctx.userHandlers[k] = cfg.UserHandlers[k]
} }
@ -731,21 +700,11 @@ func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err erro
if cfg.Debug { if cfg.Debug {
sctx.registerTrace() sctx.registerTrace()
} }
sctxs[addr] = sctx
} }
return sctxs, nil return sctxs, nil
} }
func resolveUrl(u url.URL) (addr string, secure bool, network string) {
addr = u.Host
network = "tcp"
if u.Scheme == "unix" || u.Scheme == "unixs" {
addr = u.Host + u.Path
network = "unix"
}
secure = u.Scheme == "https" || u.Scheme == "unixs"
return addr, secure, network
}
func (e *Etcd) serveClients() (err error) { func (e *Etcd) serveClients() (err error) {
if !e.cfg.ClientTLSInfo.Empty() { if !e.cfg.ClientTLSInfo.Empty() {
if e.cfg.logger != nil { if e.cfg.logger != nil {
@ -789,69 +748,15 @@ func (e *Etcd) serveClients() (err error) {
})) }))
} }
splitHttp := false
for _, sctx := range e.sctxs {
if sctx.httpOnly {
splitHttp = true
}
}
// start client servers in each goroutine // start client servers in each goroutine
for _, sctx := range e.sctxs { for _, sctx := range e.sctxs {
go func(s *serveCtx) { go func(s *serveCtx) {
e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, h, e.errHandler, e.grpcGatewayDial(splitHttp), splitHttp, gopts...)) e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, h, e.errHandler, gopts...))
}(sctx) }(sctx)
} }
return nil return nil
} }
func (e *Etcd) grpcGatewayDial(splitHttp bool) (grpcDial func(ctx context.Context) (*grpc.ClientConn, error)) {
if !e.cfg.EnableGRPCGateway {
return nil
}
sctx := e.pickGrpcGatewayServeContext(splitHttp)
addr := sctx.addr
if network := sctx.network; network == "unix" {
// explicitly define unix network for gRPC socket support
addr = fmt.Sprintf("%s://%s", network, addr)
}
opts := []grpc.DialOption{grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32))}
if sctx.secure {
tlscfg, tlsErr := e.cfg.ClientTLSInfo.ServerConfig()
if tlsErr != nil {
return func(ctx context.Context) (*grpc.ClientConn, error) {
return nil, tlsErr
}
}
dtls := tlscfg.Clone()
// trust local server
dtls.InsecureSkipVerify = true
bundle := credentials.NewBundle(credentials.Config{TLSConfig: dtls})
opts = append(opts, grpc.WithTransportCredentials(bundle.TransportCredentials()))
} else {
opts = append(opts, grpc.WithInsecure())
}
return func(ctx context.Context) (*grpc.ClientConn, error) {
conn, err := grpc.DialContext(ctx, addr, opts...)
if err != nil {
sctx.lg.Error("grpc gateway failed to dial", zap.String("addr", addr), zap.Error(err))
return nil, err
}
return conn, err
}
}
func (e *Etcd) pickGrpcGatewayServeContext(splitHttp bool) *serveCtx {
for _, sctx := range e.sctxs {
if !splitHttp || !sctx.httpOnly {
return sctx
}
}
panic("Expect at least one context able to serve grpc")
}
func (e *Etcd) serveMetrics() (err error) { func (e *Etcd) serveMetrics() (err error) {
if e.cfg.Metrics == "extensive" { if e.cfg.Metrics == "extensive" {
grpc_prometheus.EnableHandlingTimeHistogram() grpc_prometheus.EnableHandlingTimeHistogram()

View File

@ -23,6 +23,7 @@ import (
"net/http" "net/http"
"strings" "strings"
"go.etcd.io/etcd/clientv3/credentials"
"go.etcd.io/etcd/etcdserver" "go.etcd.io/etcd/etcdserver"
"go.etcd.io/etcd/etcdserver/api/v3client" "go.etcd.io/etcd/etcdserver/api/v3client"
"go.etcd.io/etcd/etcdserver/api/v3election" "go.etcd.io/etcd/etcdserver/api/v3election"
@ -41,7 +42,6 @@ import (
"github.com/soheilhy/cmux" "github.com/soheilhy/cmux"
"github.com/tmc/grpc-websocket-proxy/wsproxy" "github.com/tmc/grpc-websocket-proxy/wsproxy"
"go.uber.org/zap" "go.uber.org/zap"
"golang.org/x/net/http2"
"golang.org/x/net/trace" "golang.org/x/net/trace"
"google.golang.org/grpc" "google.golang.org/grpc"
) )
@ -49,13 +49,10 @@ import (
type serveCtx struct { type serveCtx struct {
lg *zap.Logger lg *zap.Logger
l net.Listener l net.Listener
scheme string
addr string addr string
network string network string
secure bool secure bool
insecure bool insecure bool
httpOnly bool
ctx context.Context ctx context.Context
cancel context.CancelFunc cancel context.CancelFunc
@ -90,8 +87,6 @@ func (sctx *serveCtx) serve(
tlsinfo *transport.TLSInfo, tlsinfo *transport.TLSInfo,
handler http.Handler, handler http.Handler,
errHandler func(error), errHandler func(error),
grpcDialForRestGatewayBackends func(ctx context.Context) (*grpc.ClientConn, error),
splitHttp bool,
gopts ...grpc.ServerOption) (err error) { gopts ...grpc.ServerOption) (err error) {
logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0) logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0)
<-s.ReadyNotify() <-s.ReadyNotify()
@ -101,103 +96,48 @@ func (sctx *serveCtx) serve(
} }
m := cmux.New(sctx.l) m := cmux.New(sctx.l)
var server func() error
onlyGRPC := splitHttp && !sctx.httpOnly
onlyHttp := splitHttp && sctx.httpOnly
grpcEnabled := !onlyHttp
httpEnabled := !onlyGRPC
v3c := v3client.New(s) v3c := v3client.New(s)
servElection := v3election.NewElectionServer(v3c) servElection := v3election.NewElectionServer(v3c)
servLock := v3lock.NewLockServer(v3c) servLock := v3lock.NewLockServer(v3c)
// Make sure serversC is closed even if we prematurely exit the function. var gs *grpc.Server
defer close(sctx.serversC) defer func() {
var gwmux *gw.ServeMux if err != nil && gs != nil {
if s.Cfg.EnableGRPCGateway { gs.Stop()
// GRPC gateway connects to grpc server via connection provided by grpc dial.
gwmux, err = sctx.registerGateway(grpcDialForRestGatewayBackends)
if err != nil {
sctx.lg.Error("registerGateway failed", zap.Error(err))
return err
}
}
var traffic string
switch {
case onlyGRPC:
traffic = "grpc"
case onlyHttp:
traffic = "http"
default:
traffic = "grpc+http"
} }
}()
if sctx.insecure { if sctx.insecure {
var gs *grpc.Server
var srv *http.Server
if httpEnabled {
httpmux := sctx.createMux(gwmux, handler)
srv = &http.Server{
Handler: createAccessController(sctx.lg, s, httpmux),
ErrorLog: logger, // do not log user error
}
if err := configureHttpServer(srv, s.Cfg); err != nil {
sctx.lg.Error("Configure http server failed", zap.Error(err))
return err
}
}
if grpcEnabled {
gs = v3rpc.Server(s, nil, gopts...) gs = v3rpc.Server(s, nil, gopts...)
v3electionpb.RegisterElectionServer(gs, servElection) v3electionpb.RegisterElectionServer(gs, servElection)
v3lockpb.RegisterLockServer(gs, servLock) v3lockpb.RegisterLockServer(gs, servLock)
if sctx.serviceRegister != nil { if sctx.serviceRegister != nil {
sctx.serviceRegister(gs) sctx.serviceRegister(gs)
} }
defer func(gs *grpc.Server) {
if err == nil {
return
}
if sctx.lg != nil {
sctx.lg.Warn("stopping insecure grpc server due to error", zap.Error(err))
} else {
plog.Warningf("stopping insecure grpc server due to error: %s", err)
}
gs.Stop()
if sctx.lg != nil {
sctx.lg.Warn("stopped insecure grpc server due to error", zap.Error(err))
} else {
plog.Warningf("stopped insecure grpc server due to error: %s", err)
}
}(gs)
}
if onlyGRPC {
server = func() error {
return gs.Serve(sctx.l)
}
} else {
server = m.Serve
httpl := m.Match(cmux.HTTP1())
go func(srvhttp *http.Server, tlsLis net.Listener) {
errHandler(srvhttp.Serve(tlsLis))
}(srv, httpl)
if grpcEnabled {
grpcl := m.Match(cmux.HTTP2()) grpcl := m.Match(cmux.HTTP2())
go func(gs *grpc.Server, l net.Listener) { go func() { errHandler(gs.Serve(grpcl)) }()
errHandler(gs.Serve(l))
}(gs, grpcl) var gwmux *gw.ServeMux
if s.Cfg.EnableGRPCGateway {
gwmux, err = sctx.registerGateway([]grpc.DialOption{grpc.WithInsecure()})
if err != nil {
return err
} }
} }
sctx.serversC <- &servers{grpc: gs, http: srv} httpmux := sctx.createMux(gwmux, handler)
srvhttp := &http.Server{
Handler: createAccessController(sctx.lg, s, httpmux),
ErrorLog: logger, // do not log user error
}
httpl := m.Match(cmux.HTTP1())
go func() { errHandler(srvhttp.Serve(httpl)) }()
sctx.serversC <- &servers{grpc: gs, http: srvhttp}
if sctx.lg != nil { if sctx.lg != nil {
sctx.lg.Info( sctx.lg.Info(
"serving client traffic insecurely; this is strongly discouraged!", "serving client traffic insecurely; this is strongly discouraged!",
zap.String("traffic", traffic),
zap.String("address", sctx.l.Addr().String()), zap.String("address", sctx.l.Addr().String()),
) )
} else { } else {
@ -206,77 +146,50 @@ func (sctx *serveCtx) serve(
} }
if sctx.secure { if sctx.secure {
var gs *grpc.Server
var srv *http.Server
tlscfg, tlsErr := tlsinfo.ServerConfig() tlscfg, tlsErr := tlsinfo.ServerConfig()
if tlsErr != nil { if tlsErr != nil {
return tlsErr return tlsErr
} }
if grpcEnabled {
gs = v3rpc.Server(s, tlscfg, gopts...) gs = v3rpc.Server(s, tlscfg, gopts...)
v3electionpb.RegisterElectionServer(gs, servElection) v3electionpb.RegisterElectionServer(gs, servElection)
v3lockpb.RegisterLockServer(gs, servLock) v3lockpb.RegisterLockServer(gs, servLock)
if sctx.serviceRegister != nil { if sctx.serviceRegister != nil {
sctx.serviceRegister(gs) sctx.serviceRegister(gs)
} }
defer func(gs *grpc.Server) {
if err == nil {
return
}
if sctx.lg != nil {
sctx.lg.Warn("stopping secure grpc server due to error", zap.Error(err))
} else {
plog.Warningf("stopping secure grpc server due to error: %s", err)
}
gs.Stop()
if sctx.lg != nil {
sctx.lg.Warn("stopped secure grpc server due to error", zap.Error(err))
} else {
plog.Warningf("stopped secure grpc server due to error: %s", err)
}
}(gs)
}
if httpEnabled {
if grpcEnabled {
handler = grpcHandlerFunc(gs, handler) handler = grpcHandlerFunc(gs, handler)
var gwmux *gw.ServeMux
if s.Cfg.EnableGRPCGateway {
dtls := tlscfg.Clone()
// trust local server
dtls.InsecureSkipVerify = true
bundle := credentials.NewBundle(credentials.Config{TLSConfig: dtls})
opts := []grpc.DialOption{grpc.WithTransportCredentials(bundle.TransportCredentials())}
gwmux, err = sctx.registerGateway(opts)
if err != nil {
return err
} }
}
var tlsl net.Listener
tlsl, err = transport.NewTLSListener(m.Match(cmux.Any()), tlsinfo)
if err != nil {
return err
}
// TODO: add debug flag; enable logging when debug flag is set
httpmux := sctx.createMux(gwmux, handler) httpmux := sctx.createMux(gwmux, handler)
srv = &http.Server{ srv := &http.Server{
Handler: createAccessController(sctx.lg, s, httpmux), Handler: createAccessController(sctx.lg, s, httpmux),
TLSConfig: tlscfg, TLSConfig: tlscfg,
ErrorLog: logger, // do not log user error ErrorLog: logger, // do not log user error
} }
if err := configureHttpServer(srv, s.Cfg); err != nil { go func() { errHandler(srv.Serve(tlsl)) }()
sctx.lg.Error("Configure https server failed", zap.Error(err))
return err
}
}
if onlyGRPC {
server = func() error { return gs.Serve(sctx.l) }
} else {
server = m.Serve
tlsl, err := transport.NewTLSListener(m.Match(cmux.Any()), tlsinfo)
if err != nil {
return err
}
go func(srvhttp *http.Server, tlsl net.Listener) {
errHandler(srvhttp.Serve(tlsl))
}(srv, tlsl)
}
sctx.serversC <- &servers{secure: true, grpc: gs, http: srv} sctx.serversC <- &servers{secure: true, grpc: gs, http: srv}
if sctx.lg != nil { if sctx.lg != nil {
sctx.lg.Info( sctx.lg.Info(
"serving client traffic securely", "serving client traffic securely",
zap.String("traffic", traffic),
zap.String("address", sctx.l.Addr().String()), zap.String("address", sctx.l.Addr().String()),
) )
} else { } else {
@ -284,16 +197,8 @@ func (sctx *serveCtx) serve(
} }
} }
return server() close(sctx.serversC)
} return m.Serve()
func configureHttpServer(srv *http.Server, cfg etcdserver.ServerConfig) error {
// todo (ahrtr): should we support configuring other parameters in the future as well?
return http2.ConfigureServer(srv, &http2.Server{
MaxConcurrentStreams: cfg.MaxConcurrentStreams,
// Override to avoid using priority scheduler which is affected by https://github.com/golang/go/issues/58804.
NewWriteScheduler: http2.NewRandomWriteScheduler,
})
} }
// grpcHandlerFunc returns an http.Handler that delegates to grpcServer on incoming gRPC // grpcHandlerFunc returns an http.Handler that delegates to grpcServer on incoming gRPC
@ -315,10 +220,16 @@ func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Ha
type registerHandlerFunc func(context.Context, *gw.ServeMux, *grpc.ClientConn) error type registerHandlerFunc func(context.Context, *gw.ServeMux, *grpc.ClientConn) error
func (sctx *serveCtx) registerGateway(dial func(ctx context.Context) (*grpc.ClientConn, error)) (*gw.ServeMux, error) { func (sctx *serveCtx) registerGateway(opts []grpc.DialOption) (*gw.ServeMux, error) {
ctx := sctx.ctx ctx := sctx.ctx
conn, err := dial(ctx) addr := sctx.addr
if network := sctx.network; network == "unix" {
// explicitly define unix network for gRPC socket support
addr = fmt.Sprintf("%s://%s", network, addr)
}
conn, err := grpc.DialContext(ctx, addr, opts...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -357,18 +268,6 @@ func (sctx *serveCtx) registerGateway(dial func(ctx context.Context) (*grpc.Clie
return gwmux, nil return gwmux, nil
} }
type wsProxyZapLogger struct {
*zap.Logger
}
func (w wsProxyZapLogger) Warnln(i ...interface{}) {
w.Warn(fmt.Sprint(i...))
}
func (w wsProxyZapLogger) Debugln(i ...interface{}) {
w.Debug(fmt.Sprint(i...))
}
func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.ServeMux { func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.ServeMux {
httpmux := http.NewServeMux() httpmux := http.NewServeMux()
for path, h := range sctx.userHandlers { for path, h := range sctx.userHandlers {
@ -388,7 +287,6 @@ func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.
}, },
), ),
wsproxy.WithMaxRespBodyBufferSize(0x7fffffff), wsproxy.WithMaxRespBodyBufferSize(0x7fffffff),
wsproxy.WithLogger(wsProxyZapLogger{sctx.lg}),
), ),
) )
} }

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build cov
// +build cov // +build cov
package ctlv2 package ctlv2

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build !cov
// +build !cov // +build !cov
package ctlv2 package ctlv2

View File

@ -311,8 +311,6 @@ func newCheckDatascaleCommand(cmd *cobra.Command, args []string) {
ExitWithError(ExitError, errEndpoints) ExitWithError(ExitError, errEndpoints)
} }
sec := secureCfgFromCmd(cmd)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
resp, err := clients[0].Get(ctx, checkDatascalePrefix, v3.WithPrefix(), v3.WithLimit(1)) resp, err := clients[0].Get(ctx, checkDatascalePrefix, v3.WithPrefix(), v3.WithLimit(1))
cancel() cancel()
@ -331,7 +329,7 @@ func newCheckDatascaleCommand(cmd *cobra.Command, args []string) {
wg.Add(len(clients)) wg.Add(len(clients))
// get the process_resident_memory_bytes and process_virtual_memory_bytes before the put operations // get the process_resident_memory_bytes and process_virtual_memory_bytes before the put operations
bytesBefore := endpointMemoryMetrics(eps[0], sec) bytesBefore := endpointMemoryMetrics(eps[0])
if bytesBefore == 0 { if bytesBefore == 0 {
fmt.Println("FAIL: Could not read process_resident_memory_bytes before the put operations.") fmt.Println("FAIL: Could not read process_resident_memory_bytes before the put operations.")
os.Exit(ExitError) os.Exit(ExitError)
@ -369,7 +367,7 @@ func newCheckDatascaleCommand(cmd *cobra.Command, args []string) {
s := <-sc s := <-sc
// get the process_resident_memory_bytes after the put operations // get the process_resident_memory_bytes after the put operations
bytesAfter := endpointMemoryMetrics(eps[0], sec) bytesAfter := endpointMemoryMetrics(eps[0])
if bytesAfter == 0 { if bytesAfter == 0 {
fmt.Println("FAIL: Could not read process_resident_memory_bytes after the put operations.") fmt.Println("FAIL: Could not read process_resident_memory_bytes after the put operations.")
os.Exit(ExitError) os.Exit(ExitError)

View File

@ -31,7 +31,6 @@ var (
getFromKey bool getFromKey bool
getRev int64 getRev int64
getKeysOnly bool getKeysOnly bool
getCountOnly bool
printValueOnly bool printValueOnly bool
) )
@ -51,7 +50,6 @@ func NewGetCommand() *cobra.Command {
cmd.Flags().BoolVar(&getFromKey, "from-key", false, "Get keys that are greater than or equal to the given key using byte compare") cmd.Flags().BoolVar(&getFromKey, "from-key", false, "Get keys that are greater than or equal to the given key using byte compare")
cmd.Flags().Int64Var(&getRev, "rev", 0, "Specify the kv revision") cmd.Flags().Int64Var(&getRev, "rev", 0, "Specify the kv revision")
cmd.Flags().BoolVar(&getKeysOnly, "keys-only", false, "Get only the keys") cmd.Flags().BoolVar(&getKeysOnly, "keys-only", false, "Get only the keys")
cmd.Flags().BoolVar(&getCountOnly, "count-only", false, "Get only the count")
cmd.Flags().BoolVar(&printValueOnly, "print-value-only", false, `Only write values when using the "simple" output format`) cmd.Flags().BoolVar(&printValueOnly, "print-value-only", false, `Only write values when using the "simple" output format`)
return cmd return cmd
} }
@ -66,12 +64,6 @@ func getCommandFunc(cmd *cobra.Command, args []string) {
ExitWithError(ExitError, err) ExitWithError(ExitError, err)
} }
if getCountOnly {
if _, fields := display.(*fieldsPrinter); !fields {
ExitWithError(ExitBadArgs, fmt.Errorf("--count-only is only for `--write-out=fields`"))
}
}
if printValueOnly { if printValueOnly {
dp, simple := (display).(*simplePrinter) dp, simple := (display).(*simplePrinter)
if !simple { if !simple {
@ -91,10 +83,6 @@ func getGetOp(args []string) (string, []clientv3.OpOption) {
ExitWithError(ExitBadArgs, fmt.Errorf("`--prefix` and `--from-key` cannot be set at the same time, choose one")) ExitWithError(ExitBadArgs, fmt.Errorf("`--prefix` and `--from-key` cannot be set at the same time, choose one"))
} }
if getKeysOnly && getCountOnly {
ExitWithError(ExitBadArgs, fmt.Errorf("`--keys-only` and `--count-only` cannot be set at the same time, choose one"))
}
opts := []clientv3.OpOption{} opts := []clientv3.OpOption{}
switch getConsistency { switch getConsistency {
case "s": case "s":
@ -171,9 +159,5 @@ func getGetOp(args []string) (string, []clientv3.OpOption) {
opts = append(opts, clientv3.WithKeysOnly()) opts = append(opts, clientv3.WithKeysOnly())
} }
if getCountOnly {
opts = append(opts, clientv3.WithCountOnly())
}
return key, opts return key, opts
} }

View File

@ -42,8 +42,7 @@ func transferLeadershipCommandFunc(cmd *cobra.Command, args []string) {
ExitWithError(ExitBadArgs, err) ExitWithError(ExitBadArgs, err)
} }
cfg := clientConfigFromCmd(cmd) c := mustClientFromCmd(cmd)
c := cfg.mustClient()
eps := c.Endpoints() eps := c.Endpoints()
c.Close() c.Close()
@ -53,6 +52,7 @@ func transferLeadershipCommandFunc(cmd *cobra.Command, args []string) {
var leaderCli *clientv3.Client var leaderCli *clientv3.Client
var leaderID uint64 var leaderID uint64
for _, ep := range eps { for _, ep := range eps {
cfg := clientConfigFromCmd(cmd)
cfg.endpoints = []string{ep} cfg.endpoints = []string{ep}
cli := cfg.mustClient() cli := cfg.mustClient()
resp, serr := cli.Status(ctx, ep) resp, serr := cli.Status(ctx, ep)

View File

@ -16,7 +16,6 @@ package command
import ( import (
"context" "context"
"crypto/tls"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
@ -91,26 +90,14 @@ func isCommandTimeoutFlagSet(cmd *cobra.Command) bool {
return commandTimeoutFlag.Changed return commandTimeoutFlag.Changed
} }
// get the process_resident_memory_bytes from <server>/metrics // get the process_resident_memory_bytes from <server:2379>/metrics
func endpointMemoryMetrics(host string, scfg *secureCfg) float64 { func endpointMemoryMetrics(host string) float64 {
residentMemoryKey := "process_resident_memory_bytes" residentMemoryKey := "process_resident_memory_bytes"
var residentMemoryValue string var residentMemoryValue string
if !strings.HasPrefix(host, "http://") && !strings.HasPrefix(host, "https://") { if !strings.HasPrefix(host, `http://`) {
host = "http://" + host host = "http://" + host
} }
url := host + "/metrics" url := host + "/metrics"
if strings.HasPrefix(host, "https://") {
// load client certificate
cert, err := tls.LoadX509KeyPair(scfg.cert, scfg.key)
if err != nil {
fmt.Println(fmt.Sprintf("client certificate error: %v", err))
return 0.0
}
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{
Certificates: []tls.Certificate{cert},
InsecureSkipVerify: scfg.insecureSkipVerify,
}
}
resp, err := http.Get(url) resp, err := http.Get(url)
if err != nil { if err != nil {
fmt.Println(fmt.Sprintf("fetch error: %v", err)) fmt.Println(fmt.Sprintf("fetch error: %v", err))

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build cov
// +build cov // +build cov
package ctlv3 package ctlv3

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build !cov
// +build !cov // +build !cov
package ctlv3 package ctlv3

View File

@ -29,7 +29,6 @@ import (
"go.etcd.io/etcd/embed" "go.etcd.io/etcd/embed"
"go.etcd.io/etcd/pkg/flags" "go.etcd.io/etcd/pkg/flags"
"go.etcd.io/etcd/pkg/logutil" "go.etcd.io/etcd/pkg/logutil"
"go.etcd.io/etcd/pkg/tlsutil"
"go.etcd.io/etcd/pkg/types" "go.etcd.io/etcd/pkg/types"
"go.etcd.io/etcd/version" "go.etcd.io/etcd/version"
@ -141,11 +140,7 @@ func newConfig() *config {
) )
fs.Var( fs.Var(
flags.NewUniqueURLsWithExceptions(embed.DefaultListenClientURLs, ""), "listen-client-urls", flags.NewUniqueURLsWithExceptions(embed.DefaultListenClientURLs, ""), "listen-client-urls",
"List of URLs to listen on for client grpc traffic and http as long as --listen-client-http-urls is not specified.", "List of URLs to listen on for client traffic.",
)
fs.Var(
flags.NewUniqueURLsWithExceptions("", ""), "listen-client-http-urls",
"List of URLs to listen on for http only client traffic. Enabling this flag removes http services from --listen-client-urls.",
) )
fs.Var( fs.Var(
flags.NewUniqueURLsWithExceptions("", ""), flags.NewUniqueURLsWithExceptions("", ""),
@ -168,8 +163,6 @@ func newConfig() *config {
fs.DurationVar(&cfg.ec.GRPCKeepAliveInterval, "grpc-keepalive-interval", cfg.ec.GRPCKeepAliveInterval, "Frequency duration of server-to-client ping to check if a connection is alive (0 to disable).") fs.DurationVar(&cfg.ec.GRPCKeepAliveInterval, "grpc-keepalive-interval", cfg.ec.GRPCKeepAliveInterval, "Frequency duration of server-to-client ping to check if a connection is alive (0 to disable).")
fs.DurationVar(&cfg.ec.GRPCKeepAliveTimeout, "grpc-keepalive-timeout", cfg.ec.GRPCKeepAliveTimeout, "Additional duration of wait before closing a non-responsive connection (0 to disable).") fs.DurationVar(&cfg.ec.GRPCKeepAliveTimeout, "grpc-keepalive-timeout", cfg.ec.GRPCKeepAliveTimeout, "Additional duration of wait before closing a non-responsive connection (0 to disable).")
fs.Var(flags.NewUint32Value(cfg.ec.MaxConcurrentStreams), "max-concurrent-streams", "Maximum concurrent streams that each client can open at a time.")
// clustering // clustering
fs.Var( fs.Var(
flags.NewUniqueURLsWithExceptions(embed.DefaultInitialAdvertisePeerURLs, ""), flags.NewUniqueURLsWithExceptions(embed.DefaultInitialAdvertisePeerURLs, ""),
@ -189,7 +182,7 @@ func newConfig() *config {
fs.StringVar(&cfg.ec.DNSClusterServiceName, "discovery-srv-name", cfg.ec.DNSClusterServiceName, "Service name to query when using DNS discovery.") fs.StringVar(&cfg.ec.DNSClusterServiceName, "discovery-srv-name", cfg.ec.DNSClusterServiceName, "Service name to query when using DNS discovery.")
fs.StringVar(&cfg.ec.InitialCluster, "initial-cluster", cfg.ec.InitialCluster, "Initial cluster configuration for bootstrapping.") fs.StringVar(&cfg.ec.InitialCluster, "initial-cluster", cfg.ec.InitialCluster, "Initial cluster configuration for bootstrapping.")
fs.StringVar(&cfg.ec.InitialClusterToken, "initial-cluster-token", cfg.ec.InitialClusterToken, "Initial cluster token for the etcd cluster during bootstrap.") fs.StringVar(&cfg.ec.InitialClusterToken, "initial-cluster-token", cfg.ec.InitialClusterToken, "Initial cluster token for the etcd cluster during bootstrap.")
fs.Var(cfg.cf.clusterState, "initial-cluster-state", "Initial cluster state ('new' when bootstrapping a new cluster or 'existing' when adding new members to an existing cluster). After successful initialization (bootstrapping or adding), flag is ignored on restarts.") fs.Var(cfg.cf.clusterState, "initial-cluster-state", "Initial cluster state ('new' or 'existing').")
fs.BoolVar(&cfg.ec.StrictReconfigCheck, "strict-reconfig-check", cfg.ec.StrictReconfigCheck, "Reject reconfiguration requests that would cause quorum loss.") fs.BoolVar(&cfg.ec.StrictReconfigCheck, "strict-reconfig-check", cfg.ec.StrictReconfigCheck, "Reject reconfiguration requests that would cause quorum loss.")
fs.BoolVar(&cfg.ec.EnableV2, "enable-v2", cfg.ec.EnableV2, "Accept etcd V2 client requests.") fs.BoolVar(&cfg.ec.EnableV2, "enable-v2", cfg.ec.EnableV2, "Accept etcd V2 client requests.")
@ -221,8 +214,6 @@ func newConfig() *config {
fs.StringVar(&cfg.ec.PeerTLSInfo.AllowedHostname, "peer-cert-allowed-hostname", "", "Allowed TLS hostname for inter peer authentication.") fs.StringVar(&cfg.ec.PeerTLSInfo.AllowedHostname, "peer-cert-allowed-hostname", "", "Allowed TLS hostname for inter peer authentication.")
fs.Var(flags.NewStringsValue(""), "cipher-suites", "Comma-separated list of supported TLS cipher suites between client/server and peers (empty will be auto-populated by Go).") fs.Var(flags.NewStringsValue(""), "cipher-suites", "Comma-separated list of supported TLS cipher suites between client/server and peers (empty will be auto-populated by Go).")
fs.BoolVar(&cfg.ec.PeerTLSInfo.SkipClientSANVerify, "experimental-peer-skip-client-san-verification", false, "Skip verification of SAN field in client certificate for peer connections.") fs.BoolVar(&cfg.ec.PeerTLSInfo.SkipClientSANVerify, "experimental-peer-skip-client-san-verification", false, "Skip verification of SAN field in client certificate for peer connections.")
fs.StringVar(&cfg.ec.TlsMinVersion, "tls-min-version", string(tlsutil.TLSVersion12), "Minimum TLS version supported by etcd. Possible values: TLS1.2, TLS1.3.")
fs.StringVar(&cfg.ec.TlsMaxVersion, "tls-max-version", string(tlsutil.TLSVersionDefault), "Maximum TLS version supported by etcd. Possible values: TLS1.2, TLS1.3 (empty defers to Go).")
fs.Var( fs.Var(
flags.NewUniqueURLsWithExceptions("*", "*"), flags.NewUniqueURLsWithExceptions("*", "*"),
@ -264,12 +255,9 @@ func newConfig() *config {
fs.DurationVar(&cfg.ec.ExperimentalCorruptCheckTime, "experimental-corrupt-check-time", cfg.ec.ExperimentalCorruptCheckTime, "Duration of time between cluster corruption check passes.") fs.DurationVar(&cfg.ec.ExperimentalCorruptCheckTime, "experimental-corrupt-check-time", cfg.ec.ExperimentalCorruptCheckTime, "Duration of time between cluster corruption check passes.")
fs.StringVar(&cfg.ec.ExperimentalEnableV2V3, "experimental-enable-v2v3", cfg.ec.ExperimentalEnableV2V3, "v3 prefix for serving emulated v2 state.") fs.StringVar(&cfg.ec.ExperimentalEnableV2V3, "experimental-enable-v2v3", cfg.ec.ExperimentalEnableV2V3, "v3 prefix for serving emulated v2 state.")
fs.StringVar(&cfg.ec.ExperimentalBackendFreelistType, "experimental-backend-bbolt-freelist-type", cfg.ec.ExperimentalBackendFreelistType, "ExperimentalBackendFreelistType specifies the type of freelist that boltdb backend uses(array and map are supported types)") fs.StringVar(&cfg.ec.ExperimentalBackendFreelistType, "experimental-backend-bbolt-freelist-type", cfg.ec.ExperimentalBackendFreelistType, "ExperimentalBackendFreelistType specifies the type of freelist that boltdb backend uses(array and map are supported types)")
fs.BoolVar(&cfg.ec.ExperimentalEnableLeaseCheckpoint, "experimental-enable-lease-checkpoint", false, "Enable leader to send regular checkpoints to other members to prevent reset of remaining TTL on leader change.") fs.BoolVar(&cfg.ec.ExperimentalEnableLeaseCheckpoint, "experimental-enable-lease-checkpoint", false, "Enable to persist lease remaining TTL to prevent indefinite auto-renewal of long lived leases.")
// TODO: delete in v3.7
fs.BoolVar(&cfg.ec.ExperimentalEnableLeaseCheckpointPersist, "experimental-enable-lease-checkpoint-persist", false, "Enable persisting remainingTTL to prevent indefinite auto-renewal of long lived leases. Always enabled in v3.6. Should be used to ensure smooth upgrade from v3.5 clusters with this feature enabled. Requires experimental-enable-lease-checkpoint to be enabled.")
fs.IntVar(&cfg.ec.ExperimentalCompactionBatchLimit, "experimental-compaction-batch-limit", cfg.ec.ExperimentalCompactionBatchLimit, "Sets the maximum revisions deleted in each compaction batch.") fs.IntVar(&cfg.ec.ExperimentalCompactionBatchLimit, "experimental-compaction-batch-limit", cfg.ec.ExperimentalCompactionBatchLimit, "Sets the maximum revisions deleted in each compaction batch.")
fs.DurationVar(&cfg.ec.ExperimentalWatchProgressNotifyInterval, "experimental-watch-progress-notify-interval", cfg.ec.ExperimentalWatchProgressNotifyInterval, "Duration of periodic watch progress notifications.") fs.DurationVar(&cfg.ec.ExperimentalWatchProgressNotifyInterval, "experimental-watch-progress-notify-interval", cfg.ec.ExperimentalWatchProgressNotifyInterval, "Duration of periodic watch progress notifications.")
fs.DurationVar(&cfg.ec.ExperimentalWarningApplyDuration, "experimental-warning-apply-duration", cfg.ec.ExperimentalWarningApplyDuration, "Time duration after which a warning is generated if request takes more time.")
// unsafe // unsafe
fs.BoolVar(&cfg.ec.UnsafeNoFsync, "unsafe-no-fsync", false, "Disables fsync, unsafe, will cause data loss.") fs.BoolVar(&cfg.ec.UnsafeNoFsync, "unsafe-no-fsync", false, "Disables fsync, unsafe, will cause data loss.")
@ -336,11 +324,10 @@ func (cfg *config) configFromCmdLine() error {
return err return err
} }
cfg.ec.ListenPeerUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-peer-urls") cfg.ec.LPUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-peer-urls")
cfg.ec.AdvertisePeerUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "initial-advertise-peer-urls") cfg.ec.APUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "initial-advertise-peer-urls")
cfg.ec.ListenClientUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-client-urls") cfg.ec.LCUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-client-urls")
cfg.ec.ListenClientHttpUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-client-http-urls") cfg.ec.ACUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "advertise-client-urls")
cfg.ec.AdvertiseClientUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "advertise-client-urls")
cfg.ec.ListenMetricsUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-metrics-urls") cfg.ec.ListenMetricsUrls = flags.UniqueURLsFromFlag(cfg.cf.flagSet, "listen-metrics-urls")
cfg.ec.CORS = flags.UniqueURLsMapFromFlag(cfg.cf.flagSet, "cors") cfg.ec.CORS = flags.UniqueURLsMapFromFlag(cfg.cf.flagSet, "cors")
@ -348,8 +335,6 @@ func (cfg *config) configFromCmdLine() error {
cfg.ec.CipherSuites = flags.StringsFromFlag(cfg.cf.flagSet, "cipher-suites") cfg.ec.CipherSuites = flags.StringsFromFlag(cfg.cf.flagSet, "cipher-suites")
cfg.ec.MaxConcurrentStreams = flags.Uint32FromFlag(cfg.cf.flagSet, "max-concurrent-streams")
// TODO: remove this in v3.5 // TODO: remove this in v3.5
cfg.ec.DeprecatedLogOutput = flags.UniqueStringsFromFlag(cfg.cf.flagSet, "log-output") cfg.ec.DeprecatedLogOutput = flags.UniqueStringsFromFlag(cfg.cf.flagSet, "log-output")
cfg.ec.LogOutputs = flags.UniqueStringsFromFlag(cfg.cf.flagSet, "log-outputs") cfg.ec.LogOutputs = flags.UniqueStringsFromFlag(cfg.cf.flagSet, "log-outputs")
@ -361,7 +346,7 @@ func (cfg *config) configFromCmdLine() error {
// disable default advertise-client-urls if lcurls is set // disable default advertise-client-urls if lcurls is set
missingAC := flags.IsSet(cfg.cf.flagSet, "listen-client-urls") && !flags.IsSet(cfg.cf.flagSet, "advertise-client-urls") missingAC := flags.IsSet(cfg.cf.flagSet, "listen-client-urls") && !flags.IsSet(cfg.cf.flagSet, "advertise-client-urls")
if !cfg.mayBeProxy() && missingAC { if !cfg.mayBeProxy() && missingAC {
cfg.ec.AdvertiseClientUrls = nil cfg.ec.ACUrls = nil
} }
// disable default initial-cluster if discovery is set // disable default initial-cluster if discovery is set

View File

@ -36,7 +36,6 @@ func TestConfigParsingMemberFlags(t *testing.T) {
"-snapshot-count=10", "-snapshot-count=10",
"-listen-peer-urls=http://localhost:8000,https://localhost:8001", "-listen-peer-urls=http://localhost:8000,https://localhost:8001",
"-listen-client-urls=http://localhost:7000,https://localhost:7001", "-listen-client-urls=http://localhost:7000,https://localhost:7001",
"-listen-client-http-urls=http://localhost:7002,https://localhost:7003",
// it should be set if -listen-client-urls is set // it should be set if -listen-client-urls is set
"-advertise-client-urls=http://localhost:7000,https://localhost:7001", "-advertise-client-urls=http://localhost:7000,https://localhost:7001",
} }
@ -57,10 +56,9 @@ func TestConfigFileMemberFields(t *testing.T) {
MaxWalFiles uint `json:"max-wals"` MaxWalFiles uint `json:"max-wals"`
Name string `json:"name"` Name string `json:"name"`
SnapshotCount uint64 `json:"snapshot-count"` SnapshotCount uint64 `json:"snapshot-count"`
ListenPeerUrls string `json:"listen-peer-urls"` LPUrls string `json:"listen-peer-urls"`
ListenClientUrls string `json:"listen-client-urls"` LCUrls string `json:"listen-client-urls"`
ListenClientHttpUrls string `json:"listen-client-http-urls"` AcurlsCfgFile string `json:"advertise-client-urls"`
AdvertiseClientUrls string `json:"advertise-client-urls"`
}{ }{
"testdir", "testdir",
10, 10,
@ -69,7 +67,6 @@ func TestConfigFileMemberFields(t *testing.T) {
10, 10,
"http://localhost:8000,https://localhost:8001", "http://localhost:8000,https://localhost:8001",
"http://localhost:7000,https://localhost:7001", "http://localhost:7000,https://localhost:7001",
"http://localhost:7002,https://localhost:7003",
"http://localhost:7000,https://localhost:7001", "http://localhost:7000,https://localhost:7001",
} }
@ -517,9 +514,8 @@ func mustCreateCfgFile(t *testing.T, b []byte) *os.File {
func validateMemberFlags(t *testing.T, cfg *config) { func validateMemberFlags(t *testing.T, cfg *config) {
wcfg := &embed.Config{ wcfg := &embed.Config{
Dir: "testdir", Dir: "testdir",
ListenPeerUrls: []url.URL{{Scheme: "http", Host: "localhost:8000"}, {Scheme: "https", Host: "localhost:8001"}}, LPUrls: []url.URL{{Scheme: "http", Host: "localhost:8000"}, {Scheme: "https", Host: "localhost:8001"}},
ListenClientUrls: []url.URL{{Scheme: "http", Host: "localhost:7000"}, {Scheme: "https", Host: "localhost:7001"}}, LCUrls: []url.URL{{Scheme: "http", Host: "localhost:7000"}, {Scheme: "https", Host: "localhost:7001"}},
ListenClientHttpUrls: []url.URL{{Scheme: "http", Host: "localhost:7002"}, {Scheme: "https", Host: "localhost:7003"}},
MaxSnapFiles: 10, MaxSnapFiles: 10,
MaxWalFiles: 10, MaxWalFiles: 10,
Name: "testname", Name: "testname",
@ -541,21 +537,18 @@ func validateMemberFlags(t *testing.T, cfg *config) {
if cfg.ec.SnapshotCount != wcfg.SnapshotCount { if cfg.ec.SnapshotCount != wcfg.SnapshotCount {
t.Errorf("snapcount = %v, want %v", cfg.ec.SnapshotCount, wcfg.SnapshotCount) t.Errorf("snapcount = %v, want %v", cfg.ec.SnapshotCount, wcfg.SnapshotCount)
} }
if !reflect.DeepEqual(cfg.ec.ListenPeerUrls, wcfg.ListenPeerUrls) { if !reflect.DeepEqual(cfg.ec.LPUrls, wcfg.LPUrls) {
t.Errorf("listen-peer-urls = %v, want %v", cfg.ec.ListenPeerUrls, wcfg.ListenPeerUrls) t.Errorf("listen-peer-urls = %v, want %v", cfg.ec.LPUrls, wcfg.LPUrls)
} }
if !reflect.DeepEqual(cfg.ec.ListenClientUrls, wcfg.ListenClientUrls) { if !reflect.DeepEqual(cfg.ec.LCUrls, wcfg.LCUrls) {
t.Errorf("listen-client-urls = %v, want %v", cfg.ec.ListenClientUrls, wcfg.ListenClientUrls) t.Errorf("listen-client-urls = %v, want %v", cfg.ec.LCUrls, wcfg.LCUrls)
}
if !reflect.DeepEqual(cfg.ec.ListenClientHttpUrls, wcfg.ListenClientHttpUrls) {
t.Errorf("listen-client-http-urls = %v, want %v", cfg.ec.ListenClientHttpUrls, wcfg.ListenClientHttpUrls)
} }
} }
func validateClusteringFlags(t *testing.T, cfg *config) { func validateClusteringFlags(t *testing.T, cfg *config) {
wcfg := newConfig() wcfg := newConfig()
wcfg.ec.AdvertisePeerUrls = []url.URL{{Scheme: "http", Host: "localhost:8000"}, {Scheme: "https", Host: "localhost:8001"}} wcfg.ec.APUrls = []url.URL{{Scheme: "http", Host: "localhost:8000"}, {Scheme: "https", Host: "localhost:8001"}}
wcfg.ec.AdvertiseClientUrls = []url.URL{{Scheme: "http", Host: "localhost:7000"}, {Scheme: "https", Host: "localhost:7001"}} wcfg.ec.ACUrls = []url.URL{{Scheme: "http", Host: "localhost:7000"}, {Scheme: "https", Host: "localhost:7001"}}
wcfg.ec.ClusterState = embed.ClusterStateFlagExisting wcfg.ec.ClusterState = embed.ClusterStateFlagExisting
wcfg.cf.fallback.Set(fallbackFlagExit) wcfg.cf.fallback.Set(fallbackFlagExit)
wcfg.ec.InitialCluster = "0=http://localhost:8000" wcfg.ec.InitialCluster = "0=http://localhost:8000"
@ -573,11 +566,11 @@ func validateClusteringFlags(t *testing.T, cfg *config) {
if cfg.ec.InitialClusterToken != wcfg.ec.InitialClusterToken { if cfg.ec.InitialClusterToken != wcfg.ec.InitialClusterToken {
t.Errorf("initialClusterToken = %v, want %v", cfg.ec.InitialClusterToken, wcfg.ec.InitialClusterToken) t.Errorf("initialClusterToken = %v, want %v", cfg.ec.InitialClusterToken, wcfg.ec.InitialClusterToken)
} }
if !reflect.DeepEqual(cfg.ec.AdvertisePeerUrls, wcfg.ec.AdvertisePeerUrls) { if !reflect.DeepEqual(cfg.ec.APUrls, wcfg.ec.APUrls) {
t.Errorf("initial-advertise-peer-urls = %v, want %v", cfg.ec.AdvertisePeerUrls, wcfg.ec.AdvertisePeerUrls) t.Errorf("initial-advertise-peer-urls = %v, want %v", cfg.ec.APUrls, wcfg.ec.APUrls)
} }
if !reflect.DeepEqual(cfg.ec.AdvertiseClientUrls, wcfg.ec.AdvertiseClientUrls) { if !reflect.DeepEqual(cfg.ec.ACUrls, wcfg.ec.ACUrls) {
t.Errorf("advertise-client-urls = %v, want %v", cfg.ec.AdvertiseClientUrls, wcfg.ec.AdvertiseClientUrls) t.Errorf("advertise-client-urls = %v, want %v", cfg.ec.ACUrls, wcfg.ec.ACUrls)
} }
} }

View File

@ -251,7 +251,7 @@ func startEtcdOrProxyV2() {
plog.Infof("forgot to set --initial-cluster flag?") plog.Infof("forgot to set --initial-cluster flag?")
} }
} }
if types.URLs(cfg.ec.AdvertisePeerUrls).String() == embed.DefaultInitialAdvertisePeerURLs { if types.URLs(cfg.ec.APUrls).String() == embed.DefaultInitialAdvertisePeerURLs {
if lg != nil { if lg != nil {
lg.Warn("forgot to set --initial-advertise-peer-urls?") lg.Warn("forgot to set --initial-advertise-peer-urls?")
} else { } else {
@ -507,11 +507,11 @@ func startProxy(cfg *config) error {
// setup self signed certs when serving https // setup self signed certs when serving https
cHosts, cTLS := []string{}, false cHosts, cTLS := []string{}, false
for _, u := range cfg.ec.ListenClientUrls { for _, u := range cfg.ec.LCUrls {
cHosts = append(cHosts, u.Host) cHosts = append(cHosts, u.Host)
cTLS = cTLS || u.Scheme == "https" cTLS = cTLS || u.Scheme == "https"
} }
for _, u := range cfg.ec.AdvertiseClientUrls { for _, u := range cfg.ec.ACUrls {
cHosts = append(cHosts, u.Host) cHosts = append(cHosts, u.Host)
cTLS = cTLS || u.Scheme == "https" cTLS = cTLS || u.Scheme == "https"
} }
@ -528,7 +528,7 @@ func startProxy(cfg *config) error {
} }
// Start a proxy server goroutine for each listen address // Start a proxy server goroutine for each listen address
for _, u := range cfg.ec.ListenClientUrls { for _, u := range cfg.ec.LCUrls {
l, err := transport.NewListener(u.Host, u.Scheme, &listenerTLS) l, err := transport.NewListener(u.Host, u.Scheme, &listenerTLS)
if err != nil { if err != nil {
return err return err

View File

@ -38,7 +38,6 @@ import (
pb "go.etcd.io/etcd/etcdserver/etcdserverpb" pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"go.etcd.io/etcd/pkg/debugutil" "go.etcd.io/etcd/pkg/debugutil"
"go.etcd.io/etcd/pkg/logutil" "go.etcd.io/etcd/pkg/logutil"
"go.etcd.io/etcd/pkg/tlsutil"
"go.etcd.io/etcd/pkg/transport" "go.etcd.io/etcd/pkg/transport"
"go.etcd.io/etcd/proxy/grpcproxy" "go.etcd.io/etcd/proxy/grpcproxy"
@ -46,7 +45,6 @@ import (
"github.com/soheilhy/cmux" "github.com/soheilhy/cmux"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"go.uber.org/zap" "go.uber.org/zap"
"golang.org/x/net/http2"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/grpclog" "google.golang.org/grpc/grpclog"
) )
@ -74,7 +72,6 @@ var (
grpcProxyListenCA string grpcProxyListenCA string
grpcProxyListenCert string grpcProxyListenCert string
grpcProxyListenKey string grpcProxyListenKey string
grpcProxyListenCipherSuites []string
grpcProxyListenAutoTLS bool grpcProxyListenAutoTLS bool
grpcProxyListenCRL string grpcProxyListenCRL string
@ -89,8 +86,6 @@ var (
grpcProxyEnableOrdering bool grpcProxyEnableOrdering bool
grpcProxyDebug bool grpcProxyDebug bool
maxConcurrentStreams uint32
) )
const defaultGRPCMaxCallSendMsgSize = 1.5 * 1024 * 1024 const defaultGRPCMaxCallSendMsgSize = 1.5 * 1024 * 1024
@ -142,7 +137,6 @@ func newGRPCProxyStartCommand() *cobra.Command {
cmd.Flags().StringVar(&grpcProxyListenCert, "cert-file", "", "identify secure connections to the proxy using this TLS certificate file") cmd.Flags().StringVar(&grpcProxyListenCert, "cert-file", "", "identify secure connections to the proxy using this TLS certificate file")
cmd.Flags().StringVar(&grpcProxyListenKey, "key-file", "", "identify secure connections to the proxy using this TLS key file") cmd.Flags().StringVar(&grpcProxyListenKey, "key-file", "", "identify secure connections to the proxy using this TLS key file")
cmd.Flags().StringVar(&grpcProxyListenCA, "trusted-ca-file", "", "verify certificates of TLS-enabled secure proxy using this CA bundle") cmd.Flags().StringVar(&grpcProxyListenCA, "trusted-ca-file", "", "verify certificates of TLS-enabled secure proxy using this CA bundle")
cmd.Flags().StringSliceVar(&grpcProxyListenCipherSuites, "listen-cipher-suites", grpcProxyListenCipherSuites, "Comma-separated list of supported TLS cipher suites between client/proxy (empty will be auto-populated by Go).")
cmd.Flags().BoolVar(&grpcProxyListenAutoTLS, "auto-tls", false, "proxy TLS using generated certificates") cmd.Flags().BoolVar(&grpcProxyListenAutoTLS, "auto-tls", false, "proxy TLS using generated certificates")
cmd.Flags().StringVar(&grpcProxyListenCRL, "client-crl-file", "", "proxy client certificate revocation list file.") cmd.Flags().StringVar(&grpcProxyListenCRL, "client-crl-file", "", "proxy client certificate revocation list file.")
@ -152,8 +146,6 @@ func newGRPCProxyStartCommand() *cobra.Command {
cmd.Flags().BoolVar(&grpcProxyDebug, "debug", false, "Enable debug-level logging for grpc-proxy.") cmd.Flags().BoolVar(&grpcProxyDebug, "debug", false, "Enable debug-level logging for grpc-proxy.")
cmd.Flags().Uint32Var(&maxConcurrentStreams, "max-concurrent-streams", math.MaxUint32, "Maximum concurrent streams that each client can open at a time.")
return &cmd return &cmd
} }
@ -179,27 +171,20 @@ func startGRPCProxy(cmd *cobra.Command, args []string) {
} }
grpclog.SetLoggerV2(gl) grpclog.SetLoggerV2(gl)
tlsInfo := newTLS(grpcProxyListenCA, grpcProxyListenCert, grpcProxyListenKey) tlsinfo := newTLS(grpcProxyListenCA, grpcProxyListenCert, grpcProxyListenKey)
if len(grpcProxyListenCipherSuites) > 0 { if tlsinfo == nil && grpcProxyListenAutoTLS {
cs, err := tlsutil.GetCipherSuites(grpcProxyListenCipherSuites)
if err != nil {
log.Fatal(err)
}
tlsInfo.CipherSuites = cs
}
if tlsInfo == nil && grpcProxyListenAutoTLS {
host := []string{"https://" + grpcProxyListenAddr} host := []string{"https://" + grpcProxyListenAddr}
dir := filepath.Join(grpcProxyDataDir, "fixtures", "proxy") dir := filepath.Join(grpcProxyDataDir, "fixtures", "proxy")
autoTLS, err := transport.SelfCert(lg, dir, host) autoTLS, err := transport.SelfCert(lg, dir, host)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
tlsInfo = &autoTLS tlsinfo = &autoTLS
} }
if tlsInfo != nil { if tlsinfo != nil {
lg.Info("gRPC proxy server TLS", zap.String("tls-info", fmt.Sprintf("%+v", tlsInfo))) lg.Info("gRPC proxy server TLS", zap.String("tls-info", fmt.Sprintf("%+v", tlsinfo)))
} }
m := mustListenCMux(lg, tlsInfo) m := mustListenCMux(lg, tlsinfo)
grpcl := m.Match(cmux.HTTP2()) grpcl := m.Match(cmux.HTTP2())
defer func() { defer func() {
grpcl.Close() grpcl.Close()
@ -209,20 +194,13 @@ func startGRPCProxy(cmd *cobra.Command, args []string) {
client := mustNewClient(lg) client := mustNewClient(lg)
httpClient := mustNewHTTPClient(lg) httpClient := mustNewHTTPClient(lg)
srvhttp, httpl := mustHTTPListener(lg, m, tlsInfo, client) srvhttp, httpl := mustHTTPListener(lg, m, tlsinfo, client)
if err := http2.ConfigureServer(srvhttp, &http2.Server{
MaxConcurrentStreams: maxConcurrentStreams,
}); err != nil {
lg.Fatal("Failed to configure the http server", zap.Error(err))
}
errc := make(chan error) errc := make(chan error)
go func() { errc <- newGRPCProxyServer(lg, client).Serve(grpcl) }() go func() { errc <- newGRPCProxyServer(lg, client).Serve(grpcl) }()
go func() { errc <- srvhttp.Serve(httpl) }() go func() { errc <- srvhttp.Serve(httpl) }()
go func() { errc <- m.Serve() }() go func() { errc <- m.Serve() }()
if len(grpcProxyMetricsListenAddr) > 0 { if len(grpcProxyMetricsListenAddr) > 0 {
mhttpl := mustMetricsListener(lg, tlsInfo) mhttpl := mustMetricsListener(lg, tlsinfo)
go func() { go func() {
mux := http.NewServeMux() mux := http.NewServeMux()
grpcproxy.HandleMetrics(mux, httpClient, client.Endpoints()) grpcproxy.HandleMetrics(mux, httpClient, client.Endpoints())
@ -348,7 +326,7 @@ func mustListenCMux(lg *zap.Logger, tlsinfo *transport.TLSInfo) cmux.CMux {
func newGRPCProxyServer(lg *zap.Logger, client *clientv3.Client) *grpc.Server { func newGRPCProxyServer(lg *zap.Logger, client *clientv3.Client) *grpc.Server {
if grpcProxyEnableOrdering { if grpcProxyEnableOrdering {
vf := ordering.NewOrderViolationSwitchEndpointClosure(client) vf := ordering.NewOrderViolationSwitchEndpointClosure(*client)
client.KV = ordering.NewKV(client.KV, vf) client.KV = ordering.NewKV(client.KV, vf)
lg.Info("waiting for linearized read from cluster to recover ordering") lg.Info("waiting for linearized read from cluster to recover ordering")
for { for {
@ -372,12 +350,12 @@ func newGRPCProxyServer(lg *zap.Logger, client *clientv3.Client) *grpc.Server {
} }
kvp, _ := grpcproxy.NewKvProxy(client) kvp, _ := grpcproxy.NewKvProxy(client)
watchp, _ := grpcproxy.NewWatchProxy(client.Ctx(), client) watchp, _ := grpcproxy.NewWatchProxy(client)
if grpcProxyResolverPrefix != "" { if grpcProxyResolverPrefix != "" {
grpcproxy.Register(client, grpcProxyResolverPrefix, grpcProxyAdvertiseClientURL, grpcProxyResolverTTL) grpcproxy.Register(client, grpcProxyResolverPrefix, grpcProxyAdvertiseClientURL, grpcProxyResolverTTL)
} }
clusterp, _ := grpcproxy.NewClusterProxy(client, grpcProxyAdvertiseClientURL, grpcProxyResolverPrefix) clusterp, _ := grpcproxy.NewClusterProxy(client, grpcProxyAdvertiseClientURL, grpcProxyResolverPrefix)
leasep, _ := grpcproxy.NewLeaseProxy(client.Ctx(), client) leasep, _ := grpcproxy.NewLeaseProxy(client)
mainp := grpcproxy.NewMaintenanceProxy(client) mainp := grpcproxy.NewMaintenanceProxy(client)
authp := grpcproxy.NewAuthProxy(client) authp := grpcproxy.NewAuthProxy(client)
electionp := grpcproxy.NewElectionProxy(client) electionp := grpcproxy.NewElectionProxy(client)

View File

@ -62,9 +62,7 @@ Member:
--listen-peer-urls 'http://localhost:2380' --listen-peer-urls 'http://localhost:2380'
List of URLs to listen on for peer traffic. List of URLs to listen on for peer traffic.
--listen-client-urls 'http://localhost:2379' --listen-client-urls 'http://localhost:2379'
List of URLs to listen on for client grpc traffic and http as long as --listen-client-http-urls is not specified. List of URLs to listen on for client traffic.
--listen-client-http-urls ''
List of URLs to listen on for http only client traffic. Enabling this flag removes http services from --listen-client-urls.
--max-snapshots '` + strconv.Itoa(embed.DefaultMaxSnapshots) + `' --max-snapshots '` + strconv.Itoa(embed.DefaultMaxSnapshots) + `'
Maximum number of snapshot files to retain (0 is unlimited). Maximum number of snapshot files to retain (0 is unlimited).
--max-wals '` + strconv.Itoa(embed.DefaultMaxWALs) + `' --max-wals '` + strconv.Itoa(embed.DefaultMaxWALs) + `'
@ -79,8 +77,6 @@ Member:
Maximum number of operations permitted in a transaction. Maximum number of operations permitted in a transaction.
--max-request-bytes '1572864' --max-request-bytes '1572864'
Maximum client request size in bytes the server will accept. Maximum client request size in bytes the server will accept.
--max-concurrent-streams 'math.MaxUint32'
Maximum concurrent streams that each client can open at a time.
--grpc-keepalive-min-time '5s' --grpc-keepalive-min-time '5s'
Minimum duration interval that a client should wait before pinging server. Minimum duration interval that a client should wait before pinging server.
--grpc-keepalive-interval '2h' --grpc-keepalive-interval '2h'
@ -94,8 +90,7 @@ Clustering:
--initial-cluster 'default=http://localhost:2380' --initial-cluster 'default=http://localhost:2380'
Initial cluster configuration for bootstrapping. Initial cluster configuration for bootstrapping.
--initial-cluster-state 'new' --initial-cluster-state 'new'
Initial cluster state ('new' when bootstrapping a new cluster or 'existing' when adding new members to an existing cluster). Initial cluster state ('new' or 'existing').
After successful initialization (bootstrapping or adding), flag is ignored on restarts.
--initial-cluster-token 'etcd-cluster' --initial-cluster-token 'etcd-cluster'
Initial cluster token for the etcd cluster during bootstrap. Initial cluster token for the etcd cluster during bootstrap.
Specifying this can protect you from unintended cross-cluster interaction when running multiple clusters. Specifying this can protect you from unintended cross-cluster interaction when running multiple clusters.
@ -161,10 +156,6 @@ Security:
Comma-separated whitelist of origins for CORS, or cross-origin resource sharing, (empty or * means allow all). Comma-separated whitelist of origins for CORS, or cross-origin resource sharing, (empty or * means allow all).
--host-whitelist '*' --host-whitelist '*'
Acceptable hostnames from HTTP client requests, if server is not secure (empty or * means allow all). Acceptable hostnames from HTTP client requests, if server is not secure (empty or * means allow all).
--tls-min-version 'TLS1.2'
Minimum TLS version supported by etcd. Possible values: TLS1.2, TLS1.3.
--tls-max-version ''
Maximum TLS version supported by etcd. Possible values: TLS1.2, TLS1.3 (empty will be auto-populated by Go).
Auth: Auth:
--auth-token 'simple' --auth-token 'simple'
@ -221,8 +212,6 @@ Experimental feature:
Skip verification of SAN field in client certificate for peer connections. Skip verification of SAN field in client certificate for peer connections.
--experimental-watch-progress-notify-interval '10m' --experimental-watch-progress-notify-interval '10m'
Duration of periodical watch progress notification. Duration of periodical watch progress notification.
--experimental-warning-apply-duration '100ms'
Warning is generated if requests take more than this duration.
Unsafe feature: Unsafe feature:
--force-new-cluster 'false' --force-new-cluster 'false'

View File

@ -36,7 +36,7 @@ const (
// HandleMetricsHealth registers metrics and health handlers. // HandleMetricsHealth registers metrics and health handlers.
func HandleMetricsHealth(mux *http.ServeMux, srv etcdserver.ServerV2) { func HandleMetricsHealth(mux *http.ServeMux, srv etcdserver.ServerV2) {
mux.Handle(PathMetrics, promhttp.Handler()) mux.Handle(PathMetrics, promhttp.Handler())
mux.Handle(PathHealth, NewHealthHandler(func(excludedAlarms AlarmSet) Health { return checkHealth(srv, excludedAlarms) })) mux.Handle(PathHealth, NewHealthHandler(func() Health { return checkHealth(srv) }))
} }
// HandlePrometheus registers prometheus handler on '/metrics'. // HandlePrometheus registers prometheus handler on '/metrics'.
@ -45,7 +45,7 @@ func HandlePrometheus(mux *http.ServeMux) {
} }
// NewHealthHandler handles '/health' requests. // NewHealthHandler handles '/health' requests.
func NewHealthHandler(hfunc func(excludedAlarms AlarmSet) Health) http.HandlerFunc { func NewHealthHandler(hfunc func() Health) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet { if r.Method != http.MethodGet {
w.Header().Set("Allow", http.MethodGet) w.Header().Set("Allow", http.MethodGet)
@ -53,8 +53,7 @@ func NewHealthHandler(hfunc func(excludedAlarms AlarmSet) Health) http.HandlerFu
plog.Warningf("/health error (status code %d)", http.StatusMethodNotAllowed) plog.Warningf("/health error (status code %d)", http.StatusMethodNotAllowed)
return return
} }
excludedAlarms := getExcludedAlarms(r) h := hfunc()
h := hfunc(excludedAlarms)
d, _ := json.Marshal(h) d, _ := json.Marshal(h)
if h.Health != "true" { if h.Health != "true" {
http.Error(w, string(d), http.StatusServiceUnavailable) http.Error(w, string(d), http.StatusServiceUnavailable)
@ -91,38 +90,16 @@ type Health struct {
Health string `json:"health"` Health string `json:"health"`
} }
type AlarmSet map[string]struct{}
func getExcludedAlarms(r *http.Request) (alarms AlarmSet) {
alarms = make(map[string]struct{}, 2)
alms, found := r.URL.Query()["exclude"]
if found {
for _, alm := range alms {
if len(alms) == 0 {
continue
}
alarms[alm] = struct{}{}
}
}
return alarms
}
// TODO: server NOSPACE, etcdserver.ErrNoLeader in health API // TODO: server NOSPACE, etcdserver.ErrNoLeader in health API
func checkHealth(srv etcdserver.ServerV2, excludedAlarms AlarmSet) Health { func checkHealth(srv etcdserver.ServerV2) Health {
h := Health{Health: "true"} h := Health{Health: "true"}
as := srv.Alarms() as := srv.Alarms()
if len(as) > 0 { if len(as) > 0 {
for _, v := range as {
alarmName := v.Alarm.String()
if _, found := excludedAlarms[alarmName]; found {
plog.Debugf("/health excluded alarm %s", v.String())
continue
}
h.Health = "false" h.Health = "false"
plog.Warningf("/health error due to %s", v.String()) for _, v := range as {
return h plog.Warningf("/health error due to an alarm %s", v.String())
} }
} }
@ -145,7 +122,7 @@ func checkHealth(srv etcdserver.ServerV2, excludedAlarms AlarmSet) Health {
if h.Health == "true" { if h.Health == "true" {
healthSuccess.Inc() healthSuccess.Inc()
plog.Debugf("/health OK (status code %d)", http.StatusOK) plog.Infof("/health OK (status code %d)", http.StatusOK)
} else { } else {
healthFailed.Inc() healthFailed.Inc()
} }

View File

@ -1,157 +0,0 @@
// Copyright 2021 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdhttp
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"go.etcd.io/etcd/etcdserver"
stats "go.etcd.io/etcd/etcdserver/api/v2stats"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"go.etcd.io/etcd/pkg/testutil"
"go.etcd.io/etcd/pkg/types"
"go.etcd.io/etcd/raft"
)
type fakeStats struct{}
func (s *fakeStats) SelfStats() []byte { return nil }
func (s *fakeStats) LeaderStats() []byte { return nil }
func (s *fakeStats) StoreStats() []byte { return nil }
type fakeServerV2 struct {
fakeServer
stats.Stats
health string
}
func (s *fakeServerV2) Leader() types.ID {
if s.health == "true" {
return 1
}
return types.ID(raft.None)
}
func (s *fakeServerV2) Do(ctx context.Context, r pb.Request) (etcdserver.Response, error) {
if s.health == "true" {
return etcdserver.Response{}, nil
}
return etcdserver.Response{}, fmt.Errorf("fail health check")
}
func (s *fakeServerV2) ClientCertAuthEnabled() bool { return false }
func TestHealthHandler(t *testing.T) {
// define the input and expected output
// input: alarms, and healthCheckURL
tests := []struct {
alarms []*pb.AlarmMember
healthCheckURL string
statusCode int
health string
}{
{
[]*pb.AlarmMember{},
"/health",
http.StatusOK,
"true",
},
{
[]*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}},
"/health",
http.StatusServiceUnavailable,
"false",
},
{
[]*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}},
"/health?exclude=NOSPACE",
http.StatusOK,
"true",
},
{
[]*pb.AlarmMember{},
"/health?exclude=NOSPACE",
http.StatusOK,
"true",
},
{
[]*pb.AlarmMember{{MemberID: uint64(1), Alarm: pb.AlarmType_NOSPACE}, {MemberID: uint64(2), Alarm: pb.AlarmType_NOSPACE}, {MemberID: uint64(3), Alarm: pb.AlarmType_NOSPACE}},
"/health?exclude=NOSPACE",
http.StatusOK,
"true",
},
{
[]*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}, {MemberID: uint64(1), Alarm: pb.AlarmType_CORRUPT}},
"/health?exclude=NOSPACE",
http.StatusServiceUnavailable,
"false",
},
{
[]*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}, {MemberID: uint64(1), Alarm: pb.AlarmType_CORRUPT}},
"/health?exclude=NOSPACE&exclude=CORRUPT",
http.StatusOK,
"true",
},
}
for i, tt := range tests {
func() {
mux := http.NewServeMux()
HandleMetricsHealth(mux, &fakeServerV2{
fakeServer: fakeServer{alarms: tt.alarms},
Stats: &fakeStats{},
health: tt.health,
})
ts := httptest.NewServer(mux)
defer ts.Close()
res, err := ts.Client().Do(&http.Request{Method: http.MethodGet, URL: testutil.MustNewURL(t, ts.URL+tt.healthCheckURL)})
if err != nil {
t.Errorf("fail serve http request %s %v in test case #%d", tt.healthCheckURL, err, i+1)
}
if res == nil {
t.Errorf("got nil http response with http request %s in test case #%d", tt.healthCheckURL, i+1)
return
}
if res.StatusCode != tt.statusCode {
t.Errorf("want statusCode %d but got %d in test case #%d", tt.statusCode, res.StatusCode, i+1)
}
health, err := parseHealthOutput(res.Body)
if err != nil {
t.Errorf("fail parse health check output %v", err)
}
if health.Health != tt.health {
t.Errorf("want health %s but got %s", tt.health, health.Health)
}
}()
}
}
func parseHealthOutput(body io.Reader) (Health, error) {
obj := Health{}
d, derr := ioutil.ReadAll(body)
if derr != nil {
return obj, derr
}
if err := json.Unmarshal(d, &obj); err != nil {
return obj, err
}
return obj, nil
}

View File

@ -58,7 +58,6 @@ func (c *fakeCluster) Version() *semver.Version { return nil }
type fakeServer struct { type fakeServer struct {
cluster api.Cluster cluster api.Cluster
alarms []*pb.AlarmMember
} }
func (s *fakeServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { func (s *fakeServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
@ -75,7 +74,7 @@ func (s *fakeServer) PromoteMember(ctx context.Context, id uint64) ([]*membershi
} }
func (s *fakeServer) ClusterVersion() *semver.Version { return nil } func (s *fakeServer) ClusterVersion() *semver.Version { return nil }
func (s *fakeServer) Cluster() api.Cluster { return s.cluster } func (s *fakeServer) Cluster() api.Cluster { return s.cluster }
func (s *fakeServer) Alarms() []*pb.AlarmMember { return s.alarms } func (s *fakeServer) Alarms() []*pb.AlarmMember { return nil }
var fakeRaftHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var fakeRaftHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("test data")) w.Write([]byte("test data"))

View File

@ -763,21 +763,16 @@ func ValidateClusterAndAssignIDs(lg *zap.Logger, local *RaftCluster, existing *R
if len(ems) != len(lms) { if len(ems) != len(lms) {
return fmt.Errorf("member count is unequal") return fmt.Errorf("member count is unequal")
} }
sort.Sort(MembersByPeerURLs(ems))
sort.Sort(MembersByPeerURLs(lms))
ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
defer cancel() defer cancel()
for i := range ems { for i := range ems {
var err error if ok, err := netutil.URLStringsEqual(ctx, lg, ems[i].PeerURLs, lms[i].PeerURLs); !ok {
ok := false return fmt.Errorf("unmatched member while checking PeerURLs (%v)", err)
for j := range lms {
if ok, err = netutil.URLStringsEqual(ctx, lg, ems[i].PeerURLs, lms[j].PeerURLs); ok {
lms[j].ID = ems[i].ID
break
}
}
if !ok {
return fmt.Errorf("PeerURLs: no match found for existing member (%v, %v), last resolver error (%v)", ems[i].ID, ems[i].PeerURLs, err)
} }
lms[i].ID = ems[i].ID
} }
local.members = make(map[types.ID]*Member) local.members = make(map[types.ID]*Member)
for _, m := range lms { for _, m := range lms {

View File

@ -5,11 +5,9 @@
Package snappb is a generated protocol buffer package. Package snappb is a generated protocol buffer package.
It is generated from these files: It is generated from these files:
snap.proto snap.proto
It has these top-level messages: It has these top-level messages:
Snapshot Snapshot
*/ */
package snappb package snappb

View File

@ -104,5 +104,5 @@ func TestNodeExternClone(t *testing.T) {
func sameSlice(a, b []*NodeExtern) bool { func sameSlice(a, b []*NodeExtern) bool {
ah := (*reflect.SliceHeader)(unsafe.Pointer(&a)) ah := (*reflect.SliceHeader)(unsafe.Pointer(&a))
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
return ah.Data == bh.Data && ah.Len == bh.Len && ah.Cap == bh.Cap return *ah == *bh
} }

View File

@ -844,7 +844,7 @@ func TestStoreWatchSlowConsumer(t *testing.T) {
s.Watch("/foo", true, true, 0) // stream must be true s.Watch("/foo", true, true, 0) // stream must be true
// Fill watch channel with 100 events // Fill watch channel with 100 events
for i := 1; i <= 100; i++ { for i := 1; i <= 100; i++ {
s.Set("/foo", false, string(rune(i)), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) // ok s.Set("/foo", false, string(i), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) // ok
} }
// testutil.AssertEqual(t, s.WatcherHub.count, int64(1)) // testutil.AssertEqual(t, s.WatcherHub.count, int64(1))
s.Set("/foo", false, "101", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) // ok s.Set("/foo", false, "101", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) // ok

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build !v2v3
// +build !v2v3 // +build !v2v3
package v2store_test package v2store_test

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
//go:build v2v3
// +build v2v3 // +build v2v3
package v2store_test package v2store_test

View File

@ -41,4 +41,5 @@
// if err != nil { // if err != nil {
// // handle error! // // handle error!
// } // }
//
package v3client package v3client

View File

@ -5,11 +5,9 @@
Package v3electionpb is a generated protocol buffer package. Package v3electionpb is a generated protocol buffer package.
It is generated from these files: It is generated from these files:
v3election.proto v3election.proto
It has these top-level messages: It has these top-level messages:
CampaignRequest CampaignRequest
CampaignResponse CampaignResponse
LeaderKey LeaderKey

View File

@ -5,11 +5,9 @@
Package v3lockpb is a generated protocol buffer package. Package v3lockpb is a generated protocol buffer package.
It is generated from these files: It is generated from these files:
v3lock.proto v3lock.proto
It has these top-level messages: It has these top-level messages:
LockRequest LockRequest
LockResponse LockResponse
UnlockRequest UnlockRequest

View File

@ -31,6 +31,7 @@ import (
const ( const (
grpcOverheadBytes = 512 * 1024 grpcOverheadBytes = 512 * 1024
maxStreams = math.MaxUint32
maxSendBytes = math.MaxInt32 maxSendBytes = math.MaxInt32
) )
@ -52,7 +53,7 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config, gopts ...grpc.ServerOptio
))) )))
opts = append(opts, grpc.MaxRecvMsgSize(int(s.Cfg.MaxRequestBytes+grpcOverheadBytes))) opts = append(opts, grpc.MaxRecvMsgSize(int(s.Cfg.MaxRequestBytes+grpcOverheadBytes)))
opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes)) opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes))
opts = append(opts, grpc.MaxConcurrentStreams(s.Cfg.MaxConcurrentStreams)) opts = append(opts, grpc.MaxConcurrentStreams(maxStreams))
grpcServer := grpc.NewServer(append(opts, gopts...)...) grpcServer := grpc.NewServer(append(opts, gopts...)...)
pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s)) pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s))

View File

@ -217,8 +217,8 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor
return rpctypes.ErrGRPCNoLeader return rpctypes.ErrGRPCNoLeader
} }
ctx := newCancellableContext(ss.Context()) cctx, cancel := context.WithCancel(ss.Context())
ss = serverStreamWithCtx{ctx: ctx, ServerStream: ss} ss = serverStreamWithCtx{ctx: cctx, cancel: &cancel, ServerStream: ss}
smap.mu.Lock() smap.mu.Lock()
smap.streams[ss] = struct{}{} smap.streams[ss] = struct{}{}
@ -228,8 +228,7 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor
smap.mu.Lock() smap.mu.Lock()
delete(smap.streams, ss) delete(smap.streams, ss)
smap.mu.Unlock() smap.mu.Unlock()
// TODO: investigate whether the reason for cancellation here is useful to know cancel()
ctx.Cancel(nil)
}() }()
} }
} }
@ -238,52 +237,10 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor
} }
} }
// cancellableContext wraps a context with new cancellable context that allows a
// specific cancellation error to be preserved and later retrieved using the
// Context.Err() function. This is so downstream context users can disambiguate
// the reason for the cancellation which could be from the client (for example)
// or from this interceptor code.
type cancellableContext struct {
context.Context
lock sync.RWMutex
cancel context.CancelFunc
cancelReason error
}
func newCancellableContext(parent context.Context) *cancellableContext {
ctx, cancel := context.WithCancel(parent)
return &cancellableContext{
Context: ctx,
cancel: cancel,
}
}
// Cancel stores the cancellation reason and then delegates to context.WithCancel
// against the parent context.
func (c *cancellableContext) Cancel(reason error) {
c.lock.Lock()
c.cancelReason = reason
c.lock.Unlock()
c.cancel()
}
// Err will return the preserved cancel reason error if present, and will
// otherwise return the underlying error from the parent context.
func (c *cancellableContext) Err() error {
c.lock.RLock()
defer c.lock.RUnlock()
if c.cancelReason != nil {
return c.cancelReason
}
return c.Context.Err()
}
type serverStreamWithCtx struct { type serverStreamWithCtx struct {
grpc.ServerStream grpc.ServerStream
ctx context.Context
// ctx is used so that we can preserve a reason for cancellation. cancel *context.CancelFunc
ctx *cancellableContext
} }
func (ssc serverStreamWithCtx) Context() context.Context { return ssc.ctx } func (ssc serverStreamWithCtx) Context() context.Context { return ssc.ctx }
@ -315,7 +272,7 @@ func monitorLeader(s *etcdserver.EtcdServer) *streamsMap {
smap.mu.Lock() smap.mu.Lock()
for ss := range smap.streams { for ss := range smap.streams {
if ssWithCtx, ok := ss.(serverStreamWithCtx); ok { if ssWithCtx, ok := ss.(serverStreamWithCtx); ok {
ssWithCtx.ctx.Cancel(rpctypes.ErrGRPCNoLeader) (*ssWithCtx.cancel)()
<-ss.Context().Done() <-ss.Context().Done()
} }
} }

View File

@ -35,8 +35,6 @@ var (
ErrGRPCLeaseExist = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err() ErrGRPCLeaseExist = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err()
ErrGRPCLeaseTTLTooLarge = status.New(codes.OutOfRange, "etcdserver: too large lease TTL").Err() ErrGRPCLeaseTTLTooLarge = status.New(codes.OutOfRange, "etcdserver: too large lease TTL").Err()
ErrGRPCWatchCanceled = status.New(codes.Canceled, "etcdserver: watch canceled").Err()
ErrGRPCMemberExist = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err() ErrGRPCMemberExist = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err()
ErrGRPCPeerURLExist = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err() ErrGRPCPeerURLExist = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err()
ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err() ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err()
@ -58,14 +56,12 @@ var (
ErrGRPCRoleNotFound = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err() ErrGRPCRoleNotFound = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err()
ErrGRPCRoleEmpty = status.New(codes.InvalidArgument, "etcdserver: role name is empty").Err() ErrGRPCRoleEmpty = status.New(codes.InvalidArgument, "etcdserver: role name is empty").Err()
ErrGRPCAuthFailed = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err() ErrGRPCAuthFailed = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err()
ErrGRPCPermissionNotGiven = status.New(codes.InvalidArgument, "etcdserver: permission not given").Err()
ErrGRPCPermissionDenied = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err() ErrGRPCPermissionDenied = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err()
ErrGRPCRoleNotGranted = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err() ErrGRPCRoleNotGranted = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err()
ErrGRPCPermissionNotGranted = status.New(codes.FailedPrecondition, "etcdserver: permission is not granted to the role").Err() ErrGRPCPermissionNotGranted = status.New(codes.FailedPrecondition, "etcdserver: permission is not granted to the role").Err()
ErrGRPCAuthNotEnabled = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err() ErrGRPCAuthNotEnabled = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err()
ErrGRPCInvalidAuthToken = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err() ErrGRPCInvalidAuthToken = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err()
ErrGRPCInvalidAuthMgmt = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err() ErrGRPCInvalidAuthMgmt = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err()
ErrGRPCAuthOldRevision = status.New(codes.InvalidArgument, "etcdserver: revision of auth store is old").Err()
ErrGRPCNoLeader = status.New(codes.Unavailable, "etcdserver: no leader").Err() ErrGRPCNoLeader = status.New(codes.Unavailable, "etcdserver: no leader").Err()
ErrGRPCNotLeader = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err() ErrGRPCNotLeader = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err()
@ -75,7 +71,6 @@ var (
ErrGRPCTimeout = status.New(codes.Unavailable, "etcdserver: request timed out").Err() ErrGRPCTimeout = status.New(codes.Unavailable, "etcdserver: request timed out").Err()
ErrGRPCTimeoutDueToLeaderFail = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err() ErrGRPCTimeoutDueToLeaderFail = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err()
ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err() ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err()
ErrGRPCTimeoutWaitAppliedIndex = status.New(codes.Unavailable, "etcdserver: request timed out, waiting for the applied index took too long").Err()
ErrGRPCUnhealthy = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err() ErrGRPCUnhealthy = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err()
ErrGRPCCorrupt = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err() ErrGRPCCorrupt = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err()
ErrGPRCNotSupportedForLearner = status.New(codes.Unavailable, "etcdserver: rpc not supported for learner").Err() ErrGPRCNotSupportedForLearner = status.New(codes.Unavailable, "etcdserver: rpc not supported for learner").Err()
@ -124,7 +119,6 @@ var (
ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled, ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled,
ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken, ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken,
ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt, ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt,
ErrorDesc(ErrGRPCAuthOldRevision): ErrGRPCAuthOldRevision,
ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader, ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
ErrorDesc(ErrGRPCNotLeader): ErrGRPCNotLeader, ErrorDesc(ErrGRPCNotLeader): ErrGRPCNotLeader,
@ -134,7 +128,6 @@ var (
ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout, ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout,
ErrorDesc(ErrGRPCTimeoutDueToLeaderFail): ErrGRPCTimeoutDueToLeaderFail, ErrorDesc(ErrGRPCTimeoutDueToLeaderFail): ErrGRPCTimeoutDueToLeaderFail,
ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost, ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost,
ErrorDesc(ErrGRPCTimeoutWaitAppliedIndex): ErrGRPCTimeoutWaitAppliedIndex,
ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy, ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy,
ErrorDesc(ErrGRPCCorrupt): ErrGRPCCorrupt, ErrorDesc(ErrGRPCCorrupt): ErrGRPCCorrupt,
ErrorDesc(ErrGPRCNotSupportedForLearner): ErrGPRCNotSupportedForLearner, ErrorDesc(ErrGPRCNotSupportedForLearner): ErrGPRCNotSupportedForLearner,
@ -184,7 +177,6 @@ var (
ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted) ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled) ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled)
ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken) ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken)
ErrAuthOldRevision = Error(ErrGRPCAuthOldRevision)
ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt) ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt)
ErrNoLeader = Error(ErrGRPCNoLeader) ErrNoLeader = Error(ErrGRPCNoLeader)
@ -195,7 +187,6 @@ var (
ErrTimeout = Error(ErrGRPCTimeout) ErrTimeout = Error(ErrGRPCTimeout)
ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail) ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail)
ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost) ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost)
ErrTimeoutWaitAppliedIndex = Error(ErrGRPCTimeoutWaitAppliedIndex)
ErrUnhealthy = Error(ErrGRPCUnhealthy) ErrUnhealthy = Error(ErrGRPCUnhealthy)
ErrCorrupt = Error(ErrGRPCCorrupt) ErrCorrupt = Error(ErrGRPCCorrupt)
ErrBadLeaderTransferee = Error(ErrGRPCBadLeaderTransferee) ErrBadLeaderTransferee = Error(ErrGRPCBadLeaderTransferee)

View File

@ -53,7 +53,6 @@ var toGRPCErrorMap = map[error]error{
etcdserver.ErrTimeout: rpctypes.ErrGRPCTimeout, etcdserver.ErrTimeout: rpctypes.ErrGRPCTimeout,
etcdserver.ErrTimeoutDueToLeaderFail: rpctypes.ErrGRPCTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToLeaderFail: rpctypes.ErrGRPCTimeoutDueToLeaderFail,
etcdserver.ErrTimeoutDueToConnectionLost: rpctypes.ErrGRPCTimeoutDueToConnectionLost, etcdserver.ErrTimeoutDueToConnectionLost: rpctypes.ErrGRPCTimeoutDueToConnectionLost,
etcdserver.ErrTimeoutWaitAppliedIndex: rpctypes.ErrGRPCTimeoutWaitAppliedIndex,
etcdserver.ErrUnhealthy: rpctypes.ErrGRPCUnhealthy, etcdserver.ErrUnhealthy: rpctypes.ErrGRPCUnhealthy,
etcdserver.ErrKeyNotFound: rpctypes.ErrGRPCKeyNotFound, etcdserver.ErrKeyNotFound: rpctypes.ErrGRPCKeyNotFound,
etcdserver.ErrCorrupt: rpctypes.ErrGRPCCorrupt, etcdserver.ErrCorrupt: rpctypes.ErrGRPCCorrupt,
@ -72,14 +71,12 @@ var toGRPCErrorMap = map[error]error{
auth.ErrRoleNotFound: rpctypes.ErrGRPCRoleNotFound, auth.ErrRoleNotFound: rpctypes.ErrGRPCRoleNotFound,
auth.ErrRoleEmpty: rpctypes.ErrGRPCRoleEmpty, auth.ErrRoleEmpty: rpctypes.ErrGRPCRoleEmpty,
auth.ErrAuthFailed: rpctypes.ErrGRPCAuthFailed, auth.ErrAuthFailed: rpctypes.ErrGRPCAuthFailed,
auth.ErrPermissionNotGiven: rpctypes.ErrGRPCPermissionNotGiven,
auth.ErrPermissionDenied: rpctypes.ErrGRPCPermissionDenied, auth.ErrPermissionDenied: rpctypes.ErrGRPCPermissionDenied,
auth.ErrRoleNotGranted: rpctypes.ErrGRPCRoleNotGranted, auth.ErrRoleNotGranted: rpctypes.ErrGRPCRoleNotGranted,
auth.ErrPermissionNotGranted: rpctypes.ErrGRPCPermissionNotGranted, auth.ErrPermissionNotGranted: rpctypes.ErrGRPCPermissionNotGranted,
auth.ErrAuthNotEnabled: rpctypes.ErrGRPCAuthNotEnabled, auth.ErrAuthNotEnabled: rpctypes.ErrGRPCAuthNotEnabled,
auth.ErrInvalidAuthToken: rpctypes.ErrGRPCInvalidAuthToken, auth.ErrInvalidAuthToken: rpctypes.ErrGRPCInvalidAuthToken,
auth.ErrInvalidAuthMgmt: rpctypes.ErrGRPCInvalidAuthMgmt, auth.ErrInvalidAuthMgmt: rpctypes.ErrGRPCInvalidAuthMgmt,
auth.ErrAuthOldRevision: rpctypes.ErrGRPCAuthOldRevision,
} }
func togRPCError(err error) error { func togRPCError(err error) error {

View File

@ -16,14 +16,12 @@ package v3rpc
import ( import (
"context" "context"
"fmt"
"io" "io"
"math/rand" "math/rand"
"sync" "sync"
"time" "time"
"go.etcd.io/etcd/auth" "go.etcd.io/etcd/auth"
"go.etcd.io/etcd/clientv3"
"go.etcd.io/etcd/etcdserver" "go.etcd.io/etcd/etcdserver"
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb" pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
@ -145,10 +143,6 @@ type serverWatchStream struct {
// records fragmented watch IDs // records fragmented watch IDs
fragment map[mvcc.WatchID]bool fragment map[mvcc.WatchID]bool
// indicates whether we have an outstanding global progress
// notification to send
deferredProgress bool
// closec indicates the stream is closed. // closec indicates the stream is closed.
closec chan struct{} closec chan struct{}
@ -178,8 +172,6 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
prevKV: make(map[mvcc.WatchID]bool), prevKV: make(map[mvcc.WatchID]bool),
fragment: make(map[mvcc.WatchID]bool), fragment: make(map[mvcc.WatchID]bool),
deferredProgress: false,
closec: make(chan struct{}), closec: make(chan struct{}),
} }
@ -214,25 +206,15 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
} }
}() }()
// TODO: There's a race here. When a stream is closed (e.g. due to a cancellation),
// the underlying error (e.g. a gRPC stream error) may be returned and handled
// through errc if the recv goroutine finishes before the send goroutine.
// When the recv goroutine wins, the stream error is retained. When recv loses
// the race, the underlying error is lost (unless the root error is propagated
// through Context.Err() which is not always the case (as callers have to decide
// to implement a custom context to do so). The stdlib context package builtins
// may be insufficient to carry semantically useful errors around and should be
// revisited.
select { select {
case err = <-errc: case err = <-errc:
if err == context.Canceled {
err = rpctypes.ErrGRPCWatchCanceled
}
close(sws.ctrlStream) close(sws.ctrlStream)
case <-stream.Context().Done(): case <-stream.Context().Done():
err = stream.Context().Err() err = stream.Context().Err()
// the only server-side cancellation is noleader for now.
if err == context.Canceled { if err == context.Canceled {
err = rpctypes.ErrGRPCWatchCanceled err = rpctypes.ErrGRPCNoLeader
} }
} }
@ -240,16 +222,16 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
return err return err
} }
func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) error { func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool {
authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context()) authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context())
if err != nil { if err != nil {
return err return false
} }
if authInfo == nil { if authInfo == nil {
// if auth is enabled, IsRangePermitted() can cause an error // if auth is enabled, IsRangePermitted() can cause an error
authInfo = &auth.AuthInfo{} authInfo = &auth.AuthInfo{}
} }
return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil
} }
func (sws *serverWatchStream) recvLoop() error { func (sws *serverWatchStream) recvLoop() error {
@ -283,29 +265,13 @@ func (sws *serverWatchStream) recvLoop() error {
creq.RangeEnd = []byte{} creq.RangeEnd = []byte{}
} }
err := sws.isWatchPermitted(creq) if !sws.isWatchPermitted(creq) {
if err != nil {
var cancelReason string
switch err {
case auth.ErrInvalidAuthToken:
cancelReason = rpctypes.ErrGRPCInvalidAuthToken.Error()
case auth.ErrAuthOldRevision:
cancelReason = rpctypes.ErrGRPCAuthOldRevision.Error()
case auth.ErrUserEmpty:
cancelReason = rpctypes.ErrGRPCUserEmpty.Error()
default:
if err != auth.ErrPermissionDenied {
sws.lg.Error("unexpected error code", zap.Error(err))
}
cancelReason = rpctypes.ErrGRPCPermissionDenied.Error()
}
wr := &pb.WatchResponse{ wr := &pb.WatchResponse{
Header: sws.newResponseHeader(sws.watchStream.Rev()), Header: sws.newResponseHeader(sws.watchStream.Rev()),
WatchId: clientv3.InvalidWatchID, WatchId: creq.WatchId,
Canceled: true, Canceled: true,
Created: true, Created: true,
CancelReason: cancelReason, CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(),
} }
select { select {
@ -336,10 +302,7 @@ func (sws *serverWatchStream) recvLoop() error {
sws.fragment[id] = true sws.fragment[id] = true
} }
sws.mu.Unlock() sws.mu.Unlock()
} else {
id = clientv3.InvalidWatchID
} }
wr := &pb.WatchResponse{ wr := &pb.WatchResponse{
Header: sws.newResponseHeader(wsrev), Header: sws.newResponseHeader(wsrev),
WatchId: int64(id), WatchId: int64(id),
@ -374,17 +337,11 @@ func (sws *serverWatchStream) recvLoop() error {
} }
case *pb.WatchRequest_ProgressRequest: case *pb.WatchRequest_ProgressRequest:
if uv.ProgressRequest != nil { if uv.ProgressRequest != nil {
sws.mu.Lock() sws.ctrlStream <- &pb.WatchResponse{
// Ignore if deferred progress notification is already in progress Header: sws.newResponseHeader(sws.watchStream.Rev()),
if !sws.deferredProgress { WatchId: -1, // response is not associated with any WatchId and will be broadcast to all watch channels
// Request progress for all watchers,
// force generation of a response
if !sws.watchStream.RequestProgressAll() {
sws.deferredProgress = true
} }
} }
sws.mu.Unlock()
}
default: default:
// we probably should not shutdown the entire stream when // we probably should not shutdown the entire stream when
// receive an valid command. // receive an valid command.
@ -433,7 +390,7 @@ func (sws *serverWatchStream) sendLoop() {
sws.mu.RUnlock() sws.mu.RUnlock()
for i := range evs { for i := range evs {
events[i] = &evs[i] events[i] = &evs[i]
if needPrevKV && !isCreateEvent(evs[i]) { if needPrevKV {
opt := mvcc.RangeOptions{Rev: evs[i].Kv.ModRevision - 1} opt := mvcc.RangeOptions{Rev: evs[i].Kv.ModRevision - 1}
r, err := sws.watchable.Range(evs[i].Kv.Key, nil, opt) r, err := sws.watchable.Range(evs[i].Kv.Key, nil, opt)
if err == nil && len(r.KVs) != 0 { if err == nil && len(r.KVs) != 0 {
@ -451,16 +408,12 @@ func (sws *serverWatchStream) sendLoop() {
Canceled: canceled, Canceled: canceled,
} }
// Progress notifications can have WatchID -1
// if they announce on behalf of multiple watchers
if wresp.WatchID != clientv3.InvalidWatchID {
if _, okID := ids[wresp.WatchID]; !okID { if _, okID := ids[wresp.WatchID]; !okID {
// buffer if id not yet announced // buffer if id not yet announced
wrs := append(pending[wresp.WatchID], wr) wrs := append(pending[wresp.WatchID], wr)
pending[wresp.WatchID] = wrs pending[wresp.WatchID] = wrs
continue continue
} }
}
mvcc.ReportEventReceived(len(evs)) mvcc.ReportEventReceived(len(evs))
@ -498,11 +451,6 @@ func (sws *serverWatchStream) sendLoop() {
// elide next progress update if sent a key update // elide next progress update if sent a key update
sws.progress[wresp.WatchID] = false sws.progress[wresp.WatchID] = false
} }
if sws.deferredProgress {
if sws.watchStream.RequestProgressAll() {
sws.deferredProgress = false
}
}
sws.mu.Unlock() sws.mu.Unlock()
case c, ok := <-sws.ctrlStream: case c, ok := <-sws.ctrlStream:
@ -530,12 +478,7 @@ func (sws *serverWatchStream) sendLoop() {
// track id creation // track id creation
wid := mvcc.WatchID(c.WatchId) wid := mvcc.WatchID(c.WatchId)
if c.Canceled {
if !(!(c.Canceled && c.Created) || wid == clientv3.InvalidWatchID) {
panic(fmt.Sprintf("unexpected watchId: %d, wanted: %d, since both 'Canceled' and 'Created' are true", wid, clientv3.InvalidWatchID))
}
if c.Canceled && wid != clientv3.InvalidWatchID {
delete(ids, wid) delete(ids, wid)
continue continue
} }
@ -581,10 +524,6 @@ func (sws *serverWatchStream) sendLoop() {
} }
} }
func isCreateEvent(e mvccpb.Event) bool {
return e.Type == mvccpb.PUT && e.Kv.CreateRevision == e.Kv.ModRevision
}
func sendFragments( func sendFragments(
wr *pb.WatchResponse, wr *pb.WatchResponse,
maxRequestBytes int, maxRequestBytes int,

View File

@ -33,6 +33,10 @@ import (
"go.uber.org/zap" "go.uber.org/zap"
) )
const (
warnApplyDuration = 100 * time.Millisecond
)
type applyResult struct { type applyResult struct {
resp proto.Message resp proto.Message
err error err error
@ -111,7 +115,7 @@ func (s *EtcdServer) newApplierV3() applierV3 {
func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult { func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult {
ar := &applyResult{} ar := &applyResult{}
defer func(start time.Time) { defer func(start time.Time) {
warnOfExpensiveRequest(a.s.getLogger(), a.s.Cfg.WarningApplyDuration, start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err) warnOfExpensiveRequest(a.s.getLogger(), start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err)
if ar.err != nil { if ar.err != nil {
warnOfFailedRequest(a.s.getLogger(), start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err) warnOfFailedRequest(a.s.getLogger(), start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err)
} }
@ -181,7 +185,7 @@ func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.Pu
trace = traceutil.New("put", trace = traceutil.New("put",
a.s.getLogger(), a.s.getLogger(),
traceutil.Field{Key: "key", Value: string(p.Key)}, traceutil.Field{Key: "key", Value: string(p.Key)},
traceutil.Field{Key: "req_size", Value: p.Size()}, traceutil.Field{Key: "req_size", Value: proto.Size(p)},
) )
val, leaseID := p.Value, lease.LeaseID(p.Lease) val, leaseID := p.Value, lease.LeaseID(p.Lease)
if txn == nil { if txn == nil {

View File

@ -176,26 +176,15 @@ func (aa *authApplierV3) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevoke
} }
func (aa *authApplierV3) checkLeasePuts(leaseID lease.LeaseID) error { func (aa *authApplierV3) checkLeasePuts(leaseID lease.LeaseID) error {
l := aa.lessor.Lookup(leaseID) lease := aa.lessor.Lookup(leaseID)
if l != nil { if lease != nil {
return aa.checkLeasePutsKeys(l) for _, key := range lease.Keys() {
}
return nil
}
func (aa *authApplierV3) checkLeasePutsKeys(l *lease.Lease) error {
// early return for most-common scenario of either disabled auth or admin user.
// IsAdminPermitted also checks whether auth is enabled
if err := aa.as.IsAdminPermitted(&aa.authInfo); err == nil {
return nil
}
for _, key := range l.Keys() {
if err := aa.as.IsPutPermitted(&aa.authInfo, []byte(key)); err != nil { if err := aa.as.IsPutPermitted(&aa.authInfo, []byte(key)); err != nil {
return err return err
} }
} }
}
return nil return nil
} }

View File

@ -1,115 +0,0 @@
// Copyright 2023 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdserver
import (
"testing"
"time"
"go.etcd.io/etcd/auth"
"go.etcd.io/etcd/auth/authpb"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"go.etcd.io/etcd/lease"
"golang.org/x/crypto/bcrypt"
betesting "go.etcd.io/etcd/mvcc/backend"
"github.com/stretchr/testify/assert"
"go.uber.org/zap/zaptest"
)
func TestCheckLeasePutsKeys(t *testing.T) {
lg := zaptest.NewLogger(t)
b, _ := betesting.NewDefaultTmpBackend()
defer b.Close()
simpleTokenTTLDefault := 300 * time.Second
tokenTypeSimple := "simple"
dummyIndexWaiter := func(index uint64) <-chan struct{} {
ch := make(chan struct{}, 1)
go func() {
ch <- struct{}{}
}()
return ch
}
tp, _ := auth.NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
as := auth.NewAuthStore(lg, b, tp, bcrypt.MinCost)
aa := authApplierV3{as: as}
assert.NoError(t, aa.checkLeasePutsKeys(lease.NewLease(lease.LeaseID(1), 3600)), "auth is disabled, should allow puts")
assert.NoError(t, enableAuthAndCreateRoot(aa.as), "error while enabling auth")
aa.authInfo = auth.AuthInfo{Username: "root"}
assert.NoError(t, aa.checkLeasePutsKeys(lease.NewLease(lease.LeaseID(1), 3600)), "auth is enabled, should allow puts for root")
l := lease.NewLease(lease.LeaseID(1), 3600)
l.SetLeaseItem(lease.LeaseItem{Key: "a"})
aa.authInfo = auth.AuthInfo{Username: "bob", Revision: 0}
assert.ErrorIs(t, aa.checkLeasePutsKeys(l), auth.ErrUserEmpty, "auth is enabled, should not allow bob, non existing at rev 0")
aa.authInfo = auth.AuthInfo{Username: "bob", Revision: 1}
assert.ErrorIs(t, aa.checkLeasePutsKeys(l), auth.ErrAuthOldRevision, "auth is enabled, old revision")
aa.authInfo = auth.AuthInfo{Username: "bob", Revision: aa.as.Revision()}
assert.ErrorIs(t, aa.checkLeasePutsKeys(l), auth.ErrPermissionDenied, "auth is enabled, bob does not have permissions, bob does not exist")
_, err := aa.as.UserAdd(&pb.AuthUserAddRequest{Name: "bob", Options: &authpb.UserAddOptions{NoPassword: true}})
assert.NoError(t, err, "bob should be added without error")
aa.authInfo = auth.AuthInfo{Username: "bob", Revision: aa.as.Revision()}
assert.ErrorIs(t, aa.checkLeasePutsKeys(l), auth.ErrPermissionDenied, "auth is enabled, bob exists yet does not have permissions")
// allow bob to access "a"
_, err = aa.as.RoleAdd(&pb.AuthRoleAddRequest{Name: "bobsrole"})
assert.NoError(t, err, "bobsrole should be added without error")
_, err = aa.as.RoleGrantPermission(&pb.AuthRoleGrantPermissionRequest{
Name: "bobsrole",
Perm: &authpb.Permission{
PermType: authpb.READWRITE,
Key: []byte("a"),
RangeEnd: nil,
},
})
assert.NoError(t, err, "bobsrole should be granted permissions without error")
_, err = aa.as.UserGrantRole(&pb.AuthUserGrantRoleRequest{
User: "bob",
Role: "bobsrole",
})
assert.NoError(t, err, "bob should be granted bobsrole without error")
aa.authInfo = auth.AuthInfo{Username: "bob", Revision: aa.as.Revision()}
assert.NoError(t, aa.checkLeasePutsKeys(l), "bob should be able to access key 'a'")
}
func enableAuthAndCreateRoot(as auth.AuthStore) error {
_, err := as.UserAdd(&pb.AuthUserAddRequest{
Name: "root",
Password: "root",
Options: &authpb.UserAddOptions{NoPassword: false}})
if err != nil {
return err
}
_, err = as.RoleAdd(&pb.AuthRoleAddRequest{Name: "root"})
if err != nil {
return err
}
_, err = as.UserGrantRole(&pb.AuthUserGrantRoleRequest{User: "root", Role: "root"})
if err != nil {
return err
}
return as.AuthEnable()
}

View File

@ -119,7 +119,7 @@ func (s *EtcdServer) applyV2Request(r *RequestV2) Response {
stringer: r, stringer: r,
alternative: func() string { return fmt.Sprintf("id:%d,method:%s,path:%s", r.ID, r.Method, r.Path) }, alternative: func() string { return fmt.Sprintf("id:%d,method:%s,path:%s", r.ID, r.Method, r.Path) },
} }
defer warnOfExpensiveRequest(s.getLogger(), s.Cfg.WarningApplyDuration, time.Now(), stringer, nil, nil) defer warnOfExpensiveRequest(s.getLogger(), time.Now(), stringer, nil, nil)
switch r.Method { switch r.Method {
case "POST": case "POST":

View File

@ -119,12 +119,6 @@ type ServerConfig struct {
// MaxRequestBytes is the maximum request size to send over raft. // MaxRequestBytes is the maximum request size to send over raft.
MaxRequestBytes uint MaxRequestBytes uint
// MaxConcurrentStreams specifies the maximum number of concurrent
// streams that each client can open at a time.
MaxConcurrentStreams uint32
WarningApplyDuration time.Duration
StrictReconfigCheck bool StrictReconfigCheck bool
// ClientCertAuthEnabled is true when cert has been signed by the client CA. // ClientCertAuthEnabled is true when cert has been signed by the client CA.
@ -158,12 +152,10 @@ type ServerConfig struct {
ForceNewCluster bool ForceNewCluster bool
// EnableLeaseCheckpoint enables leader to send regular checkpoints to other members to prevent reset of remaining TTL on leader change. // EnableLeaseCheckpoint enables primary lessor to persist lease remainingTTL to prevent indefinite auto-renewal of long lived leases.
EnableLeaseCheckpoint bool EnableLeaseCheckpoint bool
// LeaseCheckpointInterval time.Duration is the wait duration between lease checkpoints. // LeaseCheckpointInterval time.Duration is the wait duration between lease checkpoints.
LeaseCheckpointInterval time.Duration LeaseCheckpointInterval time.Duration
// LeaseCheckpointPersist enables persisting remainingTTL to prevent indefinite auto-renewal of long lived leases. Always enabled in v3.6. Should be used to ensure smooth upgrade from v3.5 clusters with this feature enabled.
LeaseCheckpointPersist bool
EnableGRPCGateway bool EnableGRPCGateway bool

View File

@ -202,17 +202,13 @@ func (s *EtcdServer) checkHashKV() error {
} }
alarmed := false alarmed := false
mismatch := func(id types.ID) { mismatch := func(id uint64) {
if alarmed { if alarmed {
return return
} }
alarmed = true alarmed = true
// It isn't clear which member's data is corrupted, so we
// intentionally set the memberID as 0. We will identify
// the corrupted members using quorum in 3.6. Please see
// discussion in https://github.com/etcd-io/etcd/pull/14828.
a := &pb.AlarmRequest{ a := &pb.AlarmRequest{
MemberID: 0, MemberID: id,
Action: pb.AlarmRequest_ACTIVATE, Action: pb.AlarmRequest_ACTIVATE,
Alarm: pb.AlarmType_CORRUPT, Alarm: pb.AlarmType_CORRUPT,
} }
@ -235,7 +231,7 @@ func (s *EtcdServer) checkHashKV() error {
} else { } else {
plog.Warningf("mismatched hashes %d and %d for revision %d", h, h2, rev) plog.Warningf("mismatched hashes %d and %d for revision %d", h, h2, rev)
} }
mismatch(s.ID()) mismatch(uint64(s.ID()))
} }
checkedCount := 0 checkedCount := 0
@ -244,6 +240,7 @@ func (s *EtcdServer) checkHashKV() error {
continue continue
} }
checkedCount++ checkedCount++
id := p.resp.Header.MemberId
// leader expects follower's latest revision less than or equal to leader's // leader expects follower's latest revision less than or equal to leader's
if p.resp.Header.Revision > rev2 { if p.resp.Header.Revision > rev2 {
@ -252,16 +249,16 @@ func (s *EtcdServer) checkHashKV() error {
"revision from follower must be less than or equal to leader's", "revision from follower must be less than or equal to leader's",
zap.Int64("leader-revision", rev2), zap.Int64("leader-revision", rev2),
zap.Int64("follower-revision", p.resp.Header.Revision), zap.Int64("follower-revision", p.resp.Header.Revision),
zap.String("follower-peer-id", p.id.String()), zap.String("follower-peer-id", types.ID(id).String()),
) )
} else { } else {
plog.Warningf( plog.Warningf(
"revision %d from member %v, expected at most %d", "revision %d from member %v, expected at most %d",
p.resp.Header.Revision, p.resp.Header.Revision,
p.id, types.ID(id),
rev2) rev2)
} }
mismatch(p.id) mismatch(id)
} }
// leader expects follower's latest compact revision less than or equal to leader's // leader expects follower's latest compact revision less than or equal to leader's
@ -271,17 +268,17 @@ func (s *EtcdServer) checkHashKV() error {
"compact revision from follower must be less than or equal to leader's", "compact revision from follower must be less than or equal to leader's",
zap.Int64("leader-compact-revision", crev2), zap.Int64("leader-compact-revision", crev2),
zap.Int64("follower-compact-revision", p.resp.CompactRevision), zap.Int64("follower-compact-revision", p.resp.CompactRevision),
zap.String("follower-peer-id", p.id.String()), zap.String("follower-peer-id", types.ID(id).String()),
) )
} else { } else {
plog.Warningf( plog.Warningf(
"compact revision %d from member %v, expected at most %d", "compact revision %d from member %v, expected at most %d",
p.resp.CompactRevision, p.resp.CompactRevision,
p.id, types.ID(id),
crev2, crev2,
) )
} }
mismatch(p.id) mismatch(id)
} }
// follower's compact revision is leader's old one, then hashes must match // follower's compact revision is leader's old one, then hashes must match
@ -293,18 +290,18 @@ func (s *EtcdServer) checkHashKV() error {
zap.Uint32("leader-hash", h), zap.Uint32("leader-hash", h),
zap.Int64("follower-compact-revision", p.resp.CompactRevision), zap.Int64("follower-compact-revision", p.resp.CompactRevision),
zap.Uint32("follower-hash", p.resp.Hash), zap.Uint32("follower-hash", p.resp.Hash),
zap.String("follower-peer-id", p.id.String()), zap.String("follower-peer-id", types.ID(id).String()),
) )
} else { } else {
plog.Warningf( plog.Warningf(
"hash %d at revision %d from member %v, expected hash %d", "hash %d at revision %d from member %v, expected hash %d",
p.resp.Hash, p.resp.Hash,
rev, rev,
p.id, types.ID(id),
h, h,
) )
} }
mismatch(p.id) mismatch(id)
} }
} }
if lg != nil { if lg != nil {

View File

@ -26,7 +26,6 @@ var (
ErrTimeout = errors.New("etcdserver: request timed out") ErrTimeout = errors.New("etcdserver: request timed out")
ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure") ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure")
ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost") ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost")
ErrTimeoutWaitAppliedIndex = errors.New("etcdserver: request timed out, waiting for the applied index took too long")
ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long") ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long")
ErrLeaderChanged = errors.New("etcdserver: leader changed") ErrLeaderChanged = errors.New("etcdserver: leader changed")
ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members") ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members")

View File

@ -5,13 +5,11 @@
Package etcdserverpb is a generated protocol buffer package. Package etcdserverpb is a generated protocol buffer package.
It is generated from these files: It is generated from these files:
etcdserver.proto etcdserver.proto
raft_internal.proto raft_internal.proto
rpc.proto rpc.proto
It has these top-level messages: It has these top-level messages:
Request Request
Metadata Metadata
RequestHeader RequestHeader

View File

@ -135,14 +135,8 @@ func NewBackendQuota(s *EtcdServer, name string) Quota {
} }
func (b *backendQuota) Available(v interface{}) bool { func (b *backendQuota) Available(v interface{}) bool {
cost := b.Cost(v)
// if there are no mutating requests, it's safe to pass through
if cost == 0 {
return true
}
// TODO: maybe optimize backend.Size() // TODO: maybe optimize backend.Size()
return b.s.Backend().Size()+int64(cost) < b.maxBackendBytes return b.s.Backend().Size()+int64(b.Cost(v)) < b.maxBackendBytes
} }
func (b *backendQuota) Cost(v interface{}) int { func (b *backendQuota) Cost(v interface{}) int {

View File

@ -215,18 +215,6 @@ func (r *raftNode) start(rh *raftReadyHandler) {
notifyc: notifyc, notifyc: notifyc,
} }
waitWALSync := shouldWaitWALSync(rd)
if waitWALSync {
// gofail: var raftBeforeSaveWaitWalSync struct{}
if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
if r.lg != nil {
r.lg.Fatal("failed to save Raft hard state and entries", zap.Error(err))
} else {
plog.Fatalf("failed to save state and entries error: %v", err)
}
}
}
updateCommittedIndex(&ap, rh) updateCommittedIndex(&ap, rh)
select { select {
@ -257,7 +245,6 @@ func (r *raftNode) start(rh *raftReadyHandler) {
// gofail: var raftAfterSaveSnap struct{} // gofail: var raftAfterSaveSnap struct{}
} }
if !waitWALSync {
// gofail: var raftBeforeSave struct{} // gofail: var raftBeforeSave struct{}
if err := r.storage.Save(rd.HardState, rd.Entries); err != nil { if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
if r.lg != nil { if r.lg != nil {
@ -266,7 +253,6 @@ func (r *raftNode) start(rh *raftReadyHandler) {
plog.Fatalf("failed to save state and entries error: %v", err) plog.Fatalf("failed to save state and entries error: %v", err)
} }
} }
}
if !raft.IsEmptyHardState(rd.HardState) { if !raft.IsEmptyHardState(rd.HardState) {
proposalsCommitted.Set(float64(rd.HardState.Commit)) proposalsCommitted.Set(float64(rd.HardState.Commit))
} }
@ -356,43 +342,6 @@ func (r *raftNode) start(rh *raftReadyHandler) {
}() }()
} }
// For a cluster with only one member, the raft may send both the
// unstable entries and committed entries to etcdserver, and there
// may have overlapped log entries between them.
//
// etcd responds to the client once it finishes (actually partially)
// the applying workflow. But when the client receives the response,
// it doesn't mean etcd has already successfully saved the data,
// including BoltDB and WAL, because:
// 1. etcd commits the boltDB transaction periodically instead of on each request;
// 2. etcd saves WAL entries in parallel with applying the committed entries.
//
// Accordingly, it might run into a situation of data loss when the etcd crashes
// immediately after responding to the client and before the boltDB and WAL
// successfully save the data to disk.
// Note that this issue can only happen for clusters with only one member.
//
// For clusters with multiple members, it isn't an issue, because etcd will
// not commit & apply the data before it being replicated to majority members.
// When the client receives the response, it means the data must have been applied.
// It further means the data must have been committed.
// Note: for clusters with multiple members, the raft will never send identical
// unstable entries and committed entries to etcdserver.
//
// Refer to https://github.com/etcd-io/etcd/issues/14370.
func shouldWaitWALSync(rd raft.Ready) bool {
if len(rd.CommittedEntries) == 0 || len(rd.Entries) == 0 {
return false
}
// Check if there is overlap between unstable and committed entries
// assuming that their index and term are only incrementing.
lastCommittedEntry := rd.CommittedEntries[len(rd.CommittedEntries)-1]
firstUnstableEntry := rd.Entries[0]
return lastCommittedEntry.Term > firstUnstableEntry.Term ||
(lastCommittedEntry.Term == firstUnstableEntry.Term && lastCommittedEntry.Index >= firstUnstableEntry.Index)
}
func updateCommittedIndex(ap *apply, rh *raftReadyHandler) { func updateCommittedIndex(ap *apply, rh *raftReadyHandler) {
var ci uint64 var ci uint64
if len(ap.entries) != 0 { if len(ap.entries) != 0 {

View File

@ -21,7 +21,6 @@ import (
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/assert"
"go.etcd.io/etcd/etcdserver/api/membership" "go.etcd.io/etcd/etcdserver/api/membership"
"go.etcd.io/etcd/pkg/mock/mockstorage" "go.etcd.io/etcd/pkg/mock/mockstorage"
"go.etcd.io/etcd/pkg/pbutil" "go.etcd.io/etcd/pkg/pbutil"
@ -268,79 +267,3 @@ func TestProcessDuplicatedAppRespMessage(t *testing.T) {
t.Errorf("count = %d, want %d", got, want) t.Errorf("count = %d, want %d", got, want)
} }
} }
func TestShouldWaitWALSync(t *testing.T) {
testcases := []struct {
name string
unstableEntries []raftpb.Entry
commitedEntries []raftpb.Entry
expectedResult bool
}{
{
name: "both entries are nil",
unstableEntries: nil,
commitedEntries: nil,
expectedResult: false,
},
{
name: "both entries are empty slices",
unstableEntries: []raftpb.Entry{},
commitedEntries: []raftpb.Entry{},
expectedResult: false,
},
{
name: "one nil and the other empty",
unstableEntries: nil,
commitedEntries: []raftpb.Entry{},
expectedResult: false,
},
{
name: "one nil and the other has data",
unstableEntries: nil,
commitedEntries: []raftpb.Entry{{Term: 4, Index: 10, Type: raftpb.EntryNormal, Data: []byte{0x11, 0x22, 0x33}}},
expectedResult: false,
},
{
name: "one empty and the other has data",
unstableEntries: []raftpb.Entry{},
commitedEntries: []raftpb.Entry{{Term: 4, Index: 10, Type: raftpb.EntryNormal, Data: []byte{0x11, 0x22, 0x33}}},
expectedResult: false,
},
{
name: "has different term and index",
unstableEntries: []raftpb.Entry{{Term: 5, Index: 11, Type: raftpb.EntryNormal, Data: []byte{0x11, 0x22, 0x33}}},
commitedEntries: []raftpb.Entry{{Term: 4, Index: 10, Type: raftpb.EntryNormal, Data: []byte{0x11, 0x22, 0x33}}},
expectedResult: false,
},
{
name: "has identical data",
unstableEntries: []raftpb.Entry{{Term: 4, Index: 10, Type: raftpb.EntryNormal, Data: []byte{0x11, 0x22, 0x33}}},
commitedEntries: []raftpb.Entry{{Term: 4, Index: 10, Type: raftpb.EntryNormal, Data: []byte{0x11, 0x22, 0x33}}},
expectedResult: true,
},
{
name: "has overlapped entry",
unstableEntries: []raftpb.Entry{
{Term: 4, Index: 10, Type: raftpb.EntryNormal, Data: []byte{0x11, 0x22, 0x33}},
{Term: 4, Index: 11, Type: raftpb.EntryNormal, Data: []byte{0x44, 0x55, 0x66}},
{Term: 4, Index: 12, Type: raftpb.EntryNormal, Data: []byte{0x77, 0x88, 0x99}},
},
commitedEntries: []raftpb.Entry{
{Term: 4, Index: 8, Type: raftpb.EntryNormal, Data: []byte{0x07, 0x08, 0x09}},
{Term: 4, Index: 9, Type: raftpb.EntryNormal, Data: []byte{0x10, 0x11, 0x12}},
{Term: 4, Index: 10, Type: raftpb.EntryNormal, Data: []byte{0x11, 0x22, 0x33}},
},
expectedResult: true,
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
shouldWALSync := shouldWaitWALSync(raft.Ready{
Entries: tc.unstableEntries,
CommittedEntries: tc.commitedEntries,
})
assert.Equal(t, tc.expectedResult, shouldWALSync)
})
}
}

View File

@ -25,7 +25,6 @@ import (
"os" "os"
"path" "path"
"regexp" "regexp"
"strings"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
@ -259,6 +258,10 @@ type EtcdServer struct {
peerRt http.RoundTripper peerRt http.RoundTripper
reqIDGen *idutil.Generator reqIDGen *idutil.Generator
// forceVersionC is used to force the version monitor loop
// to detect the cluster version immediately.
forceVersionC chan struct{}
// wgMu blocks concurrent waitgroup mutation while server stopping // wgMu blocks concurrent waitgroup mutation while server stopping
wgMu sync.RWMutex wgMu sync.RWMutex
// wg is used to wait for the go routines that depends on the server state // wg is used to wait for the go routines that depends on the server state
@ -273,9 +276,6 @@ type EtcdServer struct {
leadTimeMu sync.RWMutex leadTimeMu sync.RWMutex
leadElectedTime time.Time leadElectedTime time.Time
firstCommitInTermMu sync.RWMutex
firstCommitInTermC chan struct{}
*AccessController *AccessController
} }
@ -323,17 +323,6 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
plog.Fatalf("create snapshot directory error: %v", err) plog.Fatalf("create snapshot directory error: %v", err)
} }
} }
if err = fileutil.RemoveMatchFile(cfg.Logger, cfg.SnapDir(), func(fileName string) bool {
return strings.HasPrefix(fileName, "tmp")
}); err != nil {
cfg.Logger.Error(
"failed to remove temp file(s) in snapshot directory",
zap.String("path", cfg.SnapDir()),
zap.Error(err),
)
}
ss := snap.New(cfg.Logger, cfg.SnapDir()) ss := snap.New(cfg.Logger, cfg.SnapDir())
bepath := cfg.backendPath() bepath := cfg.backendPath()
@ -539,8 +528,8 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
SyncTicker: time.NewTicker(500 * time.Millisecond), SyncTicker: time.NewTicker(500 * time.Millisecond),
peerRt: prt, peerRt: prt,
reqIDGen: idutil.NewGenerator(uint16(id), time.Now()), reqIDGen: idutil.NewGenerator(uint16(id), time.Now()),
forceVersionC: make(chan struct{}),
AccessController: &AccessController{CORS: cfg.CORS, HostWhitelist: cfg.HostWhitelist}, AccessController: &AccessController{CORS: cfg.CORS, HostWhitelist: cfg.HostWhitelist},
firstCommitInTermC: make(chan struct{}),
} }
serverID.With(prometheus.Labels{"server_id": id.String()}).Set(1) serverID.With(prometheus.Labels{"server_id": id.String()}).Set(1)
@ -554,11 +543,9 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
srv.lessor = lease.NewLessor( srv.lessor = lease.NewLessor(
srv.getLogger(), srv.getLogger(),
srv.be, srv.be,
srv.cluster,
lease.LessorConfig{ lease.LessorConfig{
MinLeaseTTL: int64(math.Ceil(minTTL.Seconds())), MinLeaseTTL: int64(math.Ceil(minTTL.Seconds())),
CheckpointInterval: cfg.LeaseCheckpointInterval, CheckpointInterval: cfg.LeaseCheckpointInterval,
CheckpointPersist: cfg.LeaseCheckpointPersist,
ExpiredLeasesRetryInterval: srv.Cfg.ReqTimeout(), ExpiredLeasesRetryInterval: srv.Cfg.ReqTimeout(),
}) })
@ -1072,42 +1059,19 @@ func (s *EtcdServer) run() {
f := func(context.Context) { s.applyAll(&ep, &ap) } f := func(context.Context) { s.applyAll(&ep, &ap) }
sched.Schedule(f) sched.Schedule(f)
case leases := <-expiredLeaseC: case leases := <-expiredLeaseC:
s.revokeExpiredLeases(leases)
case err := <-s.errorc:
if lg != nil {
lg.Warn("server error", zap.Error(err))
lg.Warn("data-dir used by this member must be removed")
} else {
plog.Errorf("%s", err)
plog.Infof("the data-dir used by this member must be removed.")
}
return
case <-getSyncC():
if s.v2store.HasTTLKeys() {
s.sync(s.Cfg.ReqTimeout())
}
case <-s.stop:
return
}
}
}
func (s *EtcdServer) revokeExpiredLeases(leases []*lease.Lease) {
s.goAttach(func() { s.goAttach(func() {
lg := s.Logger()
// Increases throughput of expired leases deletion process through parallelization // Increases throughput of expired leases deletion process through parallelization
c := make(chan struct{}, maxPendingRevokes) c := make(chan struct{}, maxPendingRevokes)
for _, curLease := range leases { for _, lease := range leases {
select { select {
case c <- struct{}{}: case c <- struct{}{}:
case <-s.stopping: case <-s.stopping:
return return
} }
lid := lease.ID
f := func(lid int64) {
s.goAttach(func() { s.goAttach(func() {
ctx := s.authStore.WithRoot(s.ctx) ctx := s.authStore.WithRoot(s.ctx)
_, lerr := s.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lid}) _, lerr := s.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: int64(lid)})
if lerr == nil { if lerr == nil {
leaseExpired.Inc() leaseExpired.Inc()
} else { } else {
@ -1125,10 +1089,24 @@ func (s *EtcdServer) revokeExpiredLeases(leases []*lease.Lease) {
<-c <-c
}) })
} }
f(int64(curLease.ID))
}
}) })
case err := <-s.errorc:
if lg != nil {
lg.Warn("server error", zap.Error(err))
lg.Warn("data-dir used by this member must be removed")
} else {
plog.Errorf("%s", err)
plog.Infof("the data-dir used by this member must be removed.")
}
return
case <-getSyncC():
if s.v2store.HasTTLKeys() {
s.sync(s.Cfg.ReqTimeout())
}
case <-s.stop:
return
}
}
} }
func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) { func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) {
@ -1798,10 +1776,6 @@ func (s *EtcdServer) mayPromoteMember(id types.ID) error {
// Note: it will return nil if member is not found in cluster or if member is not learner. // Note: it will return nil if member is not found in cluster or if member is not learner.
// These two conditions will be checked before apply phase later. // These two conditions will be checked before apply phase later.
func (s *EtcdServer) isLearnerReady(id uint64) error { func (s *EtcdServer) isLearnerReady(id uint64) error {
if err := s.waitAppliedIndex(); err != nil {
return err
}
rs := s.raftStatus() rs := s.raftStatus()
// leader's raftStatus.Progress is not nil // leader's raftStatus.Progress is not nil
@ -1821,17 +1795,13 @@ func (s *EtcdServer) isLearnerReady(id uint64) error {
} }
} }
// We should return an error in API directly, to avoid the request if isFound {
// being unnecessarily delivered to raft.
if !isFound {
return membership.ErrIDNotFound
}
leaderMatch := rs.Progress[leaderID].Match leaderMatch := rs.Progress[leaderID].Match
// the learner's Match not caught up with leader yet // the learner's Match not caught up with leader yet
if float64(learnerMatch) < float64(leaderMatch)*readyPercent { if float64(learnerMatch) < float64(leaderMatch)*readyPercent {
return ErrLearnerNotReady return ErrLearnerNotReady
} }
}
return nil return nil
} }
@ -1942,16 +1912,6 @@ func (s *EtcdServer) leaderChangedNotify() <-chan struct{} {
return s.leaderChanged return s.leaderChanged
} }
// FirstCommitInTermNotify returns channel that will be unlocked on first
// entry committed in new term, which is necessary for new leader to answer
// read-only requests (leader is not able to respond any read-only requests
// as long as linearizable semantic is required)
func (s *EtcdServer) FirstCommitInTermNotify() <-chan struct{} {
s.firstCommitInTermMu.RLock()
defer s.firstCommitInTermMu.RUnlock()
return s.firstCommitInTermC
}
// RaftStatusGetter represents etcd server and Raft progress. // RaftStatusGetter represents etcd server and Raft progress.
type RaftStatusGetter interface { type RaftStatusGetter interface {
ID() types.ID ID() types.ID
@ -2219,8 +2179,10 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
// raft state machine may generate noop entry when leader confirmation. // raft state machine may generate noop entry when leader confirmation.
// skip it in advance to avoid some potential bug in the future // skip it in advance to avoid some potential bug in the future
if len(e.Data) == 0 { if len(e.Data) == 0 {
s.notifyAboutFirstCommitInTerm() select {
case s.forceVersionC <- struct{}{}:
default:
}
// promote lessor when the local member is leader and finished // promote lessor when the local member is leader and finished
// applying all entries from the last term. // applying all entries from the last term.
if s.isLeader() { if s.isLeader() {
@ -2293,15 +2255,6 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
}) })
} }
func (s *EtcdServer) notifyAboutFirstCommitInTerm() {
newNotifier := make(chan struct{})
s.firstCommitInTermMu.Lock()
notifierToClose := s.firstCommitInTermC
s.firstCommitInTermC = newNotifier
s.firstCommitInTermMu.Unlock()
close(notifierToClose)
}
// applyConfChange applies a ConfChange to the server. It is only // applyConfChange applies a ConfChange to the server. It is only
// invoked with a ConfChange that has already passed through Raft // invoked with a ConfChange that has already passed through Raft
func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState) (bool, error) { func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState) (bool, error) {
@ -2528,7 +2481,7 @@ func (s *EtcdServer) ClusterVersion() *semver.Version {
func (s *EtcdServer) monitorVersions() { func (s *EtcdServer) monitorVersions() {
for { for {
select { select {
case <-s.FirstCommitInTermNotify(): case <-s.forceVersionC:
case <-time.After(monitorVersionInterval): case <-time.After(monitorVersionInterval):
case <-s.stopping: case <-s.stopping:
return return

View File

@ -994,8 +994,7 @@ func TestSnapshot(t *testing.T) {
defer func() { ch <- struct{}{} }() defer func() { ch <- struct{}{} }()
if len(gaction) != 2 { if len(gaction) != 2 {
t.Errorf("len(action) = %d, want 2", len(gaction)) t.Fatalf("len(action) = %d, want 2", len(gaction))
return
} }
if !reflect.DeepEqual(gaction[0], testutil.Action{Name: "SaveSnap"}) { if !reflect.DeepEqual(gaction[0], testutil.Action{Name: "SaveSnap"}) {
t.Errorf("action = %s, want SaveSnap", gaction[0]) t.Errorf("action = %s, want SaveSnap", gaction[0])
@ -1157,8 +1156,7 @@ func TestTriggerSnap(t *testing.T) {
// (SnapshotCount+1) * Puts + SaveSnap = (SnapshotCount+1) * Save + SaveSnap + Release // (SnapshotCount+1) * Puts + SaveSnap = (SnapshotCount+1) * Save + SaveSnap + Release
if len(gaction) != wcnt { if len(gaction) != wcnt {
t.Logf("gaction: %v", gaction) t.Logf("gaction: %v", gaction)
t.Errorf("len(action) = %d, want %d", len(gaction), wcnt) t.Fatalf("len(action) = %d, want %d", len(gaction), wcnt)
return
} }
if !reflect.DeepEqual(gaction[wcnt-2], testutil.Action{Name: "SaveSnap"}) { if !reflect.DeepEqual(gaction[wcnt-2], testutil.Action{Name: "SaveSnap"}) {
@ -1850,59 +1848,3 @@ func (s *sendMsgAppRespTransporter) Send(m []raftpb.Message) {
} }
s.sendC <- send s.sendC <- send
} }
func TestWaitAppliedIndex(t *testing.T) {
cases := []struct {
name string
appliedIndex uint64
committedIndex uint64
action func(s *EtcdServer)
ExpectedError error
}{
{
name: "The applied Id is already equal to the commitId",
appliedIndex: 10,
committedIndex: 10,
action: func(s *EtcdServer) {
s.applyWait.Trigger(10)
},
ExpectedError: nil,
},
{
name: "The etcd server has already stopped",
appliedIndex: 10,
committedIndex: 12,
action: func(s *EtcdServer) {
s.stopping <- struct{}{}
},
ExpectedError: ErrStopped,
},
{
name: "Timed out waiting for the applied index",
appliedIndex: 10,
committedIndex: 12,
action: nil,
ExpectedError: ErrTimeoutWaitAppliedIndex,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
s := &EtcdServer{
appliedIndex: tc.appliedIndex,
committedIndex: tc.committedIndex,
stopping: make(chan struct{}, 1),
applyWait: wait.NewTimeList(),
}
if tc.action != nil {
go tc.action(s)
}
err := s.waitAppliedIndex()
if err != tc.ExpectedError {
t.Errorf("Unexpected error, want (%v), got (%v)", tc.ExpectedError, err)
}
})
}
}

Some files were not shown because too many files have changed in this diff Show More