Compare commits
146 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
72d3e382e7 | ||
![]() |
eb9cee9ee3 | ||
![]() |
85abf6e46d | ||
![]() |
1eac258f58 | ||
![]() |
91da298560 | ||
![]() |
19e2e70e4f | ||
![]() |
8ea187e2cf | ||
![]() |
e63d058247 | ||
![]() |
1558ede7f8 | ||
![]() |
41061e56ad | ||
![]() |
501d8f01ea | ||
![]() |
38669a0709 | ||
![]() |
7489911d51 | ||
![]() |
15b7954d03 | ||
![]() |
6cc1345a0b | ||
![]() |
589a6993b8 | ||
![]() |
4bacd21e20 | ||
![]() |
0ecc337028 | ||
![]() |
628fa1818e | ||
![]() |
d19fbe541b | ||
![]() |
6bbc85827b | ||
![]() |
dbde4f2d5e | ||
![]() |
15715dcf1a | ||
![]() |
963d3b9369 | ||
![]() |
ba829044f5 | ||
![]() |
c4eb81af99 | ||
![]() |
ceafa1b33e | ||
![]() |
5890bc8bd6 | ||
![]() |
c274aa5ea4 | ||
![]() |
276ee962ec | ||
![]() |
8d1b8335e3 | ||
![]() |
c3f447a698 | ||
![]() |
85e037d9c6 | ||
![]() |
a1691be1bd | ||
![]() |
df35086b6a | ||
![]() |
eeefd614c8 | ||
![]() |
4276c33026 | ||
![]() |
cfc08e5f06 | ||
![]() |
0b7e4184e8 | ||
![]() |
35bd924596 | ||
![]() |
62596faeed | ||
![]() |
b7e5f5bc12 | ||
![]() |
91bed2e01f | ||
![]() |
b19eb0f339 | ||
![]() |
8557cb29ba | ||
![]() |
ef415e3fe1 | ||
![]() |
82eae9227c | ||
![]() |
656dc63eab | ||
![]() |
30799c97be | ||
![]() |
16fe9a89ff | ||
![]() |
c499d9b047 | ||
![]() |
2702f9e5f2 | ||
![]() |
94634fc258 | ||
![]() |
afd6d8a40d | ||
![]() |
9aeabe447d | ||
![]() |
aa7126864d | ||
![]() |
3be9460ddc | ||
![]() |
f27ef4d343 | ||
![]() |
a1c5f59b59 | ||
a40f14d92c | |||
![]() |
d51c6c689b | ||
![]() |
becc228c5a | ||
![]() |
0880605772 | ||
![]() |
bea35fd2c6 | ||
![]() |
8a03d2e961 | ||
![]() |
a4b43b388d | ||
![]() |
e3b29b66a4 | ||
![]() |
eb0fb0e799 | ||
![]() |
40b71074e8 | ||
![]() |
7e2d426ec0 | ||
![]() |
3019246742 | ||
![]() |
dd1b699fc4 | ||
![]() |
f44aaf8248 | ||
![]() |
ae9734ed27 | ||
![]() |
781bde75e2 | ||
![]() |
d5ebbbceb8 | ||
![]() |
7cd5872656 | ||
![]() |
46a0a44f95 | ||
![]() |
17cef6e3e9 | ||
![]() |
c07cba001b | ||
![]() |
b8878eac45 | ||
![]() |
e71e0c5c88 | ||
![]() |
bc44e367c3 | ||
![]() |
299e0f17aa | ||
![]() |
75d5e78d1f | ||
![]() |
c60dabf2f3 | ||
![]() |
8a4afdbcc2 | ||
![]() |
6fcab5af9f | ||
![]() |
008074187c | ||
![]() |
cf558ee8b7 | ||
![]() |
e800c62eca | ||
![]() |
0372cfc7ab | ||
![]() |
18dfb9cca3 | ||
![]() |
7b8270416d | ||
![]() |
a2c37485dd | ||
![]() |
67bfc310f0 | ||
![]() |
ed28c768a3 | ||
![]() |
d3a702a09d | ||
![]() |
319331192e | ||
![]() |
2acdf88406 | ||
![]() |
a8454e453f | ||
![]() |
32583af167 | ||
![]() |
85cc4deae6 | ||
![]() |
7dec4c412c | ||
![]() |
a4667f596a | ||
![]() |
0207d1df66 | ||
![]() |
99e893d285 | ||
![]() |
d5dec731db | ||
![]() |
81a2edc365 | ||
![]() |
e5424fc474 | ||
![]() |
4488595e05 | ||
![]() |
7b99863e02 | ||
![]() |
490c6139ac | ||
![]() |
31e49a4df3 | ||
![]() |
83fc96df0c | ||
![]() |
45192cf62b | ||
![]() |
1a1281005c | ||
![]() |
a4f42948e8 | ||
![]() |
2212a84adb | ||
![]() |
e42d7b5248 | ||
![]() |
b86bb615ff | ||
![]() |
ee963470f4 | ||
![]() |
36452a1c1d | ||
![]() |
4571e528f4 | ||
![]() |
37ac22205b | ||
![]() |
493f15c156 | ||
![]() |
c8b3c6f54c | ||
![]() |
368ff75a10 | ||
![]() |
7adbfa1144 | ||
![]() |
e151faf3cc | ||
![]() |
8292fd5051 | ||
![]() |
c37245ed4b | ||
![]() |
6dab8aff66 | ||
![]() |
c69efda350 | ||
![]() |
3d8e9a323d | ||
![]() |
963b242846 | ||
![]() |
6f011ce524 | ||
![]() |
36f8dee003 | ||
![]() |
47001f28bd | ||
![]() |
9a24f73f7b | ||
![]() |
7d1cf64049 | ||
![]() |
05c441f92f | ||
![]() |
434f7e83f0 | ||
![]() |
91b1a9182a | ||
![]() |
78f67988aa | ||
![]() |
3b8f812955 |
89
.github/workflows/tests.yaml
vendored
Normal file
89
.github/workflows/tests.yaml
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
name: Tests
|
||||
on: [push, pull_request]
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
go: [1.12.17, 1.15.15]
|
||||
target:
|
||||
- linux-amd64-fmt
|
||||
- linux-amd64-integration-1-cpu
|
||||
- linux-amd64-integration-2-cpu
|
||||
- linux-amd64-integration-4-cpu
|
||||
- linux-amd64-functional
|
||||
- linux-amd64-unit
|
||||
- all-build
|
||||
- linux-amd64-grpcproxy
|
||||
- linux-386-unit
|
||||
exclude:
|
||||
- go: 1.12.17
|
||||
target: linux-amd64-grpcproxy
|
||||
- go: 1.12.17
|
||||
target: linux-386-unit
|
||||
- go: 1.15.15
|
||||
target: linux-amd64-integration-1-cpu
|
||||
- go: 1.15.15
|
||||
target: linux-amd64-integration-2-cpu
|
||||
- go: 1.12.17
|
||||
target: linux-amd64-unit
|
||||
- go: 1.15.15
|
||||
target: linux-amd64-coverage
|
||||
- go: 1.12.17
|
||||
target: linux-amd64-fmt
|
||||
- go: 1.15.15
|
||||
target: linux-386-unit
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
- run: date
|
||||
- env:
|
||||
TARGET: ${{ matrix.target }}
|
||||
GO_VERSION: ${{ matrix.go }}
|
||||
run: |
|
||||
RACE='true'; if [[ ${GO_VERSION} == 1.15.15 ]]; then echo 'setting race off'; RACE='false'; fi
|
||||
echo "${TARGET}"
|
||||
case "${TARGET}" in
|
||||
linux-amd64-fmt)
|
||||
GOARCH=amd64 PASSES='fmt bom dep' ./test
|
||||
;;
|
||||
linux-amd64-integration-1-cpu)
|
||||
GOARCH=amd64 CPU=1 PASSES='integration' RACE="${RACE}" ./test
|
||||
;;
|
||||
linux-amd64-integration-2-cpu)
|
||||
GOARCH=amd64 CPU=2 PASSES='integration' RACE="${RACE}" ./test
|
||||
;;
|
||||
linux-amd64-integration-4-cpu)
|
||||
GOARCH=amd64 CPU=4 PASSES='integration' RACE="${RACE}" ./test
|
||||
;;
|
||||
linux-amd64-functional)
|
||||
./build && GOARCH=amd64 PASSES='functional' RACE="${RACE}" ./test
|
||||
;;
|
||||
linux-amd64-unit)
|
||||
GOARCH=amd64 PASSES='unit' RACE="${RACE}" ./test
|
||||
;;
|
||||
all-build)
|
||||
GOARCH=amd64 PASSES='build' ./test
|
||||
GOARCH=386 PASSES='build' ./test
|
||||
GO_BUILD_FLAGS='-v' GOOS=darwin GOARCH=amd64 ./build
|
||||
GO_BUILD_FLAGS='-v' GOOS=windows GOARCH=amd64 ./build
|
||||
GO_BUILD_FLAGS='-v' GOARCH=arm ./build
|
||||
GO_BUILD_FLAGS='-v' GOARCH=arm64 ./build
|
||||
GO_BUILD_FLAGS='-v' GOARCH=ppc64le ./build
|
||||
GO_BUILD_FLAGS='-v' GOARCH=s390x ./build
|
||||
;;
|
||||
linux-amd64-grpcproxy)
|
||||
PASSES='build grpcproxy' CPU='4' ./test 2>&1 | tee test.log
|
||||
! egrep "(--- FAIL:|DATA RACE|panic: test timed out|appears to have leaked)" -B50 -A10 test.log
|
||||
;;
|
||||
linux-386-unit)
|
||||
GOARCH=386 PASSES='unit' ./test
|
||||
;;
|
||||
*)
|
||||
echo "Failed to find target"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
1
.gitignore
vendored
1
.gitignore
vendored
@@ -31,6 +31,7 @@ vendor/**/*
|
||||
!vendor/**/License*
|
||||
!vendor/**/LICENCE*
|
||||
!vendor/**/LICENSE*
|
||||
!vendor/modules.txt
|
||||
vendor/**/*_test.go
|
||||
|
||||
*.bak
|
||||
|
94
.travis.yml
94
.travis.yml
@@ -1,94 +0,0 @@
|
||||
language: go
|
||||
go_import_path: go.etcd.io/etcd
|
||||
|
||||
sudo: required
|
||||
|
||||
services: docker
|
||||
|
||||
go:
|
||||
- 1.12.12
|
||||
|
||||
notifications:
|
||||
on_success: never
|
||||
on_failure: never
|
||||
|
||||
env:
|
||||
matrix:
|
||||
- TARGET=linux-amd64-fmt
|
||||
- TARGET=linux-amd64-integration-1-cpu
|
||||
- TARGET=linux-amd64-integration-2-cpu
|
||||
- TARGET=linux-amd64-integration-4-cpu
|
||||
- TARGET=linux-amd64-functional
|
||||
- TARGET=linux-amd64-unit
|
||||
- TARGET=all-build
|
||||
- TARGET=linux-amd64-grpcproxy
|
||||
- TARGET=linux-386-unit
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
allow_failures:
|
||||
- go: 1.12.12
|
||||
env: TARGET=linux-amd64-grpcproxy
|
||||
- go: 1.12.12
|
||||
env: TARGET=linux-386-unit
|
||||
|
||||
before_install:
|
||||
- if [[ $TRAVIS_GO_VERSION == 1.* ]]; then docker pull gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION}; fi
|
||||
|
||||
install:
|
||||
- go get -t -v -d ./...
|
||||
|
||||
script:
|
||||
- echo "TRAVIS_GO_VERSION=${TRAVIS_GO_VERSION}"
|
||||
- >
|
||||
case "${TARGET}" in
|
||||
linux-amd64-fmt)
|
||||
docker run --rm \
|
||||
--volume=`pwd`:/go/src/go.etcd.io/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
|
||||
/bin/bash -c "GOARCH=amd64 PASSES='fmt bom dep' ./test"
|
||||
;;
|
||||
linux-amd64-integration-1-cpu)
|
||||
docker run --rm \
|
||||
--volume=`pwd`:/go/src/go.etcd.io/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
|
||||
/bin/bash -c "GOARCH=amd64 CPU=1 PASSES='integration' ./test"
|
||||
;;
|
||||
linux-amd64-integration-2-cpu)
|
||||
docker run --rm \
|
||||
--volume=`pwd`:/go/src/go.etcd.io/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
|
||||
/bin/bash -c "GOARCH=amd64 CPU=2 PASSES='integration' ./test"
|
||||
;;
|
||||
linux-amd64-integration-4-cpu)
|
||||
docker run --rm \
|
||||
--volume=`pwd`:/go/src/go.etcd.io/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
|
||||
/bin/bash -c "GOARCH=amd64 CPU=4 PASSES='integration' ./test"
|
||||
;;
|
||||
linux-amd64-functional)
|
||||
docker run --rm \
|
||||
--volume=`pwd`:/go/src/go.etcd.io/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
|
||||
/bin/bash -c "./build && GOARCH=amd64 PASSES='functional' ./test"
|
||||
;;
|
||||
linux-amd64-unit)
|
||||
docker run --rm \
|
||||
--volume=`pwd`:/go/src/go.etcd.io/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
|
||||
/bin/bash -c "GOARCH=amd64 PASSES='unit' ./test"
|
||||
;;
|
||||
all-build)
|
||||
docker run --rm \
|
||||
--volume=`pwd`:/go/src/go.etcd.io/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
|
||||
/bin/bash -c "GOARCH=amd64 PASSES='build' ./test \
|
||||
&& GOARCH=386 PASSES='build' ./test \
|
||||
&& GO_BUILD_FLAGS='-v' GOOS=darwin GOARCH=amd64 ./build \
|
||||
&& GO_BUILD_FLAGS='-v' GOOS=windows GOARCH=amd64 ./build \
|
||||
&& GO_BUILD_FLAGS='-v' GOARCH=arm ./build \
|
||||
&& GO_BUILD_FLAGS='-v' GOARCH=arm64 ./build \
|
||||
&& GO_BUILD_FLAGS='-v' GOARCH=ppc64le ./build"
|
||||
;;
|
||||
linux-amd64-grpcproxy)
|
||||
sudo HOST_TMP_DIR=/tmp TEST_OPTS="PASSES='build grpcproxy'" make docker-test
|
||||
;;
|
||||
linux-386-unit)
|
||||
docker run --rm \
|
||||
--volume=`pwd`:/go/src/go.etcd.io/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
|
||||
/bin/bash -c "GOARCH=386 PASSES='unit' ./test"
|
||||
;;
|
||||
esac
|
@@ -1,4 +1,5 @@
|
||||
FROM k8s.gcr.io/debian-base:v1.0.0
|
||||
# TODO: move to k8s.gcr.io/build-image/debian-base:bullseye-v1.y.z when patched
|
||||
FROM debian:bullseye-20210927
|
||||
|
||||
ADD etcd /usr/local/bin/
|
||||
ADD etcdctl /usr/local/bin/
|
||||
|
@@ -1,4 +1,5 @@
|
||||
FROM k8s.gcr.io/debian-base-arm64:v1.0.0
|
||||
# TODO: move to k8s.gcr.io/build-image/debian-base-arm64:bullseye-1.y.z when patched
|
||||
FROM arm64v8/debian:bullseye-20210927
|
||||
|
||||
ADD etcd /usr/local/bin/
|
||||
ADD etcdctl /usr/local/bin/
|
||||
|
@@ -1,4 +1,5 @@
|
||||
FROM k8s.gcr.io/debian-base-ppc64le:v1.0.0
|
||||
# TODO: move to k8s.gcr.io/build-image/debian-base-ppc64le:bullseye-1.y.z when patched
|
||||
FROM ppc64le/debian:bullseye-20210927
|
||||
|
||||
ADD etcd /usr/local/bin/
|
||||
ADD etcdctl /usr/local/bin/
|
||||
|
@@ -174,3 +174,5 @@ As of version v3.2 if an etcd server is launched with the option `--client-cert-
|
||||
As of version v3.3 if an etcd server is launched with the option `--peer-cert-allowed-cn` or `--peer-cert-allowed-hostname` filtering of inter-peer connections is enabled. Nodes can only join the etcd cluster if their TLS certificate identity match the allowed one.
|
||||
See [etcd security page](https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/security.md) for more details.
|
||||
|
||||
## Notes on password strength
|
||||
`etcdctl` command line interface and etcd API don't check a strength (length, coexistence of numbers and alphabets, etc) of the password during creating a new user or updating password of an existing user. An administrator needs to care about a requirement of password strength by themselves.
|
||||
|
@@ -4,7 +4,7 @@ title: etcd gateway
|
||||
|
||||
## What is etcd gateway
|
||||
|
||||
etcd gateway is a simple TCP proxy that forwards network data to the etcd cluster. The gateway is stateless and transparent; it neither inspects client requests nor interferes with cluster responses.
|
||||
etcd gateway is a simple TCP proxy that forwards network data to the etcd cluster. The gateway is stateless and transparent; it neither inspects client requests nor interferes with cluster responses. It does not terminate TLS connections, do TLS handshakes on behalf of its clients, or verify if the connection is secured.
|
||||
|
||||
The gateway supports multiple etcd server endpoints and works on a simple round-robin policy. It only routes to available endpoints and hides failures from its clients. Other retry policies, such as weighted round-robin, may be supported in the future.
|
||||
|
||||
@@ -74,7 +74,7 @@ $ etcd gateway start --discovery-srv=example.com
|
||||
|
||||
* Comma-separated list of etcd server targets for forwarding client connections.
|
||||
* Default: `127.0.0.1:2379`
|
||||
* Invalid example: `https://127.0.0.1:2379` (gateway does not terminate TLS)
|
||||
* Invalid example: `https://127.0.0.1:2379` (gateway does not terminate TLS). Note that the gateway does not verify the HTTP schema or inspect the requests, it only forwards requests to the given endpoints.
|
||||
|
||||
#### --discovery-srv
|
||||
|
||||
@@ -103,5 +103,5 @@ $ etcd gateway start --discovery-srv=example.com
|
||||
|
||||
#### --trusted-ca-file
|
||||
|
||||
* Path to the client TLS CA file for the etcd cluster. Used to authenticate endpoints.
|
||||
* Path to the client TLS CA file for the etcd cluster to verify the endpoints returned from SRV discovery. Note that it is ONLY used for authenticating the discovered endpoints rather than creating connections for data transferring. The gateway never terminates TLS connections or create TLS connections on behalf of its clients.
|
||||
* Default: (not set)
|
||||
|
@@ -2,7 +2,7 @@
|
||||
title: Transport security model
|
||||
---
|
||||
|
||||
etcd supports automatic TLS as well as authentication through client certificates for both clients to server as well as peer (server to server / cluster) communication.
|
||||
etcd supports automatic TLS as well as authentication through client certificates for both clients to server as well as peer (server to server / cluster) communication. **Note that etcd doesn't enable [RBAC based authentication][auth] or the authentication feature in the transport layer by default to reduce friction for users getting started with the database. Further, changing this default would be a breaking change for the project which was established since 2013. An etcd cluster which doesn't enable security features can expose its data to any clients.**
|
||||
|
||||
To get up and running, first have a CA certificate and a signed key pair for one member. It is recommended to create and sign a new key pair for every member in a cluster.
|
||||
|
||||
@@ -426,8 +426,17 @@ Make sure to sign the certificates with a Subject Name the member's public IP ad
|
||||
|
||||
The certificate needs to be signed for the member's FQDN in its Subject Name, use Subject Alternative Names (short IP SANs) to add the IP address. The `etcd-ca` tool provides `--domain=` option for its `new-cert` command, and openssl can make [it][alt-name] too.
|
||||
|
||||
### Does etcd encrypt data stored on disk drives?
|
||||
No. etcd doesn't encrypt key/value data stored on disk drives. If a user need to encrypt data stored on etcd, there are some options:
|
||||
* Let client applications encrypt and decrypt the data
|
||||
* Use a feature of underlying storage systems for encrypting stored data like [dm-crypt]
|
||||
|
||||
### I’m seeing a log warning that "directory X exist without recommended permission -rwx------"
|
||||
When etcd create certain new directories it sets file permission to 700 to prevent unprivileged access as possible. However, if user has already created a directory with own preference, etcd uses the existing directory and logs a warning message if the permission is different than 700.
|
||||
|
||||
[cfssl]: https://github.com/cloudflare/cfssl
|
||||
[tls-setup]: ../../hack/tls-setup
|
||||
[tls-guide]: https://github.com/coreos/docs/blob/master/os/generate-self-signed-certificates.md
|
||||
[alt-name]: http://wiki.cacert.org/FAQ/subjectAltName
|
||||
[auth]: authentication.md
|
||||
[dm-crypt]: https://en.wikipedia.org/wiki/Dm-crypt
|
||||
|
6
Makefile
6
Makefile
@@ -51,7 +51,7 @@ docker-remove:
|
||||
|
||||
|
||||
|
||||
GO_VERSION ?= 1.12.12
|
||||
GO_VERSION ?= 1.12.17
|
||||
ETCD_VERSION ?= $(shell git rev-parse --short HEAD || echo "GitNotFound")
|
||||
|
||||
TEST_SUFFIX = $(shell date +%s | base64 | head -c 15)
|
||||
@@ -65,11 +65,11 @@ endif
|
||||
|
||||
|
||||
# Example:
|
||||
# GO_VERSION=1.12.12 make build-docker-test
|
||||
# GO_VERSION=1.12.17 make build-docker-test
|
||||
# make build-docker-test
|
||||
#
|
||||
# gcloud docker -- login -u _json_key -p "$(cat /etc/gcp-key-etcd-development.json)" https://gcr.io
|
||||
# GO_VERSION=1.12.12 make push-docker-test
|
||||
# GO_VERSION=1.12.17 make push-docker-test
|
||||
# make push-docker-test
|
||||
#
|
||||
# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com
|
||||
|
@@ -21,7 +21,7 @@ import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
jwt "github.com/dgrijalva/jwt-go"
|
||||
"github.com/golang-jwt/jwt"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@@ -21,7 +21,7 @@ import (
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
jwt "github.com/dgrijalva/jwt-go"
|
||||
"github.com/golang-jwt/jwt"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@@ -37,7 +37,7 @@ const (
|
||||
|
||||
// var for testing purposes
|
||||
var (
|
||||
simpleTokenTTL = 5 * time.Minute
|
||||
simpleTokenTTLDefault = 300 * time.Second
|
||||
simpleTokenTTLResolution = 1 * time.Second
|
||||
)
|
||||
|
||||
@@ -47,6 +47,7 @@ type simpleTokenTTLKeeper struct {
|
||||
stopc chan struct{}
|
||||
deleteTokenFunc func(string)
|
||||
mu *sync.Mutex
|
||||
simpleTokenTTL time.Duration
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) stop() {
|
||||
@@ -58,12 +59,12 @@ func (tm *simpleTokenTTLKeeper) stop() {
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) addSimpleToken(token string) {
|
||||
tm.tokens[token] = time.Now().Add(simpleTokenTTL)
|
||||
tm.tokens[token] = time.Now().Add(tm.simpleTokenTTL)
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) resetSimpleToken(token string) {
|
||||
if _, ok := tm.tokens[token]; ok {
|
||||
tm.tokens[token] = time.Now().Add(simpleTokenTTL)
|
||||
tm.tokens[token] = time.Now().Add(tm.simpleTokenTTL)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,6 +102,7 @@ type tokenSimple struct {
|
||||
simpleTokenKeeper *simpleTokenTTLKeeper
|
||||
simpleTokensMu sync.Mutex
|
||||
simpleTokens map[string]string // token -> username
|
||||
simpleTokenTTL time.Duration
|
||||
}
|
||||
|
||||
func (t *tokenSimple) genTokenPrefix() (string, error) {
|
||||
@@ -157,6 +159,10 @@ func (t *tokenSimple) invalidateUser(username string) {
|
||||
}
|
||||
|
||||
func (t *tokenSimple) enable() {
|
||||
if t.simpleTokenTTL <= 0 {
|
||||
t.simpleTokenTTL = simpleTokenTTLDefault
|
||||
}
|
||||
|
||||
delf := func(tk string) {
|
||||
if username, ok := t.simpleTokens[tk]; ok {
|
||||
if t.lg != nil {
|
||||
@@ -177,6 +183,7 @@ func (t *tokenSimple) enable() {
|
||||
stopc: make(chan struct{}),
|
||||
deleteTokenFunc: delf,
|
||||
mu: &t.simpleTokensMu,
|
||||
simpleTokenTTL: t.simpleTokenTTL,
|
||||
}
|
||||
go t.simpleTokenKeeper.run()
|
||||
}
|
||||
@@ -234,10 +241,14 @@ func (t *tokenSimple) isValidSimpleToken(ctx context.Context, token string) bool
|
||||
return false
|
||||
}
|
||||
|
||||
func newTokenProviderSimple(lg *zap.Logger, indexWaiter func(uint64) <-chan struct{}) *tokenSimple {
|
||||
func newTokenProviderSimple(lg *zap.Logger, indexWaiter func(uint64) <-chan struct{}, TokenTTL time.Duration) *tokenSimple {
|
||||
if lg == nil {
|
||||
lg = zap.NewNop()
|
||||
}
|
||||
return &tokenSimple{
|
||||
lg: lg,
|
||||
simpleTokens: make(map[string]string),
|
||||
indexWaiter: indexWaiter,
|
||||
lg: lg,
|
||||
simpleTokens: make(map[string]string),
|
||||
indexWaiter: indexWaiter,
|
||||
simpleTokenTTL: TokenTTL,
|
||||
}
|
||||
}
|
||||
|
@@ -24,9 +24,9 @@ import (
|
||||
// TestSimpleTokenDisabled ensures that TokenProviderSimple behaves correctly when
|
||||
// disabled.
|
||||
func TestSimpleTokenDisabled(t *testing.T) {
|
||||
initialState := newTokenProviderSimple(zap.NewExample(), dummyIndexWaiter)
|
||||
initialState := newTokenProviderSimple(zap.NewExample(), dummyIndexWaiter, simpleTokenTTLDefault)
|
||||
|
||||
explicitlyDisabled := newTokenProviderSimple(zap.NewExample(), dummyIndexWaiter)
|
||||
explicitlyDisabled := newTokenProviderSimple(zap.NewExample(), dummyIndexWaiter, simpleTokenTTLDefault)
|
||||
explicitlyDisabled.enable()
|
||||
explicitlyDisabled.disable()
|
||||
|
||||
@@ -48,7 +48,7 @@ func TestSimpleTokenDisabled(t *testing.T) {
|
||||
// TestSimpleTokenAssign ensures that TokenProviderSimple can correctly assign a
|
||||
// token, look it up with info, and invalidate it by user.
|
||||
func TestSimpleTokenAssign(t *testing.T) {
|
||||
tp := newTokenProviderSimple(zap.NewExample(), dummyIndexWaiter)
|
||||
tp := newTokenProviderSimple(zap.NewExample(), dummyIndexWaiter, simpleTokenTTLDefault)
|
||||
tp.enable()
|
||||
ctx := context.WithValue(context.WithValue(context.TODO(), AuthenticateParamIndex{}, uint64(1)), AuthenticateParamSimpleTokenPrefix{}, "dummy")
|
||||
token, err := tp.assign(ctx, "user1", 0)
|
||||
|
@@ -23,6 +23,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/auth/authpb"
|
||||
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
@@ -59,6 +60,7 @@ var (
|
||||
ErrRoleNotFound = errors.New("auth: role not found")
|
||||
ErrRoleEmpty = errors.New("auth: role name is empty")
|
||||
ErrAuthFailed = errors.New("auth: authentication failed, invalid user ID or password")
|
||||
ErrNoPasswordUser = errors.New("auth: authentication failed, password was given for no password user")
|
||||
ErrPermissionDenied = errors.New("auth: permission denied")
|
||||
ErrRoleNotGranted = errors.New("auth: role is not granted to the user")
|
||||
ErrPermissionNotGranted = errors.New("auth: permission is not granted to the role")
|
||||
@@ -360,7 +362,7 @@ func (as *authStore) CheckPassword(username, password string) (uint64, error) {
|
||||
}
|
||||
|
||||
if user.Options != nil && user.Options.NoPassword {
|
||||
return 0, ErrAuthFailed
|
||||
return 0, ErrNoPasswordUser
|
||||
}
|
||||
|
||||
return getRevision(tx), nil
|
||||
@@ -994,7 +996,7 @@ func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error {
|
||||
if !as.IsAuthEnabled() {
|
||||
return nil
|
||||
}
|
||||
if authInfo == nil {
|
||||
if authInfo == nil || authInfo.Username == "" {
|
||||
return ErrUserEmpty
|
||||
}
|
||||
|
||||
@@ -1351,7 +1353,8 @@ func decomposeOpts(lg *zap.Logger, optstr string) (string, map[string]string, er
|
||||
func NewTokenProvider(
|
||||
lg *zap.Logger,
|
||||
tokenOpts string,
|
||||
indexWaiter func(uint64) <-chan struct{}) (TokenProvider, error) {
|
||||
indexWaiter func(uint64) <-chan struct{},
|
||||
TokenTTL time.Duration) (TokenProvider, error) {
|
||||
tokenType, typeSpecificOpts, err := decomposeOpts(lg, tokenOpts)
|
||||
if err != nil {
|
||||
return nil, ErrInvalidAuthOpts
|
||||
@@ -1364,7 +1367,7 @@ func NewTokenProvider(
|
||||
} else {
|
||||
plog.Warningf("simple token is not cryptographically signed")
|
||||
}
|
||||
return newTokenProviderSimple(lg, indexWaiter), nil
|
||||
return newTokenProviderSimple(lg, indexWaiter, TokenTTL), nil
|
||||
|
||||
case tokenTypeJWT:
|
||||
return newTokenProviderJWT(lg, typeSpecificOpts)
|
||||
|
@@ -48,7 +48,7 @@ func TestNewAuthStoreRevision(t *testing.T) {
|
||||
b, tPath := backend.NewDefaultTmpBackend()
|
||||
defer os.Remove(tPath)
|
||||
|
||||
tp, err := NewTokenProvider(zap.NewExample(), tokenTypeSimple, dummyIndexWaiter)
|
||||
tp, err := NewTokenProvider(zap.NewExample(), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -78,7 +78,7 @@ func TestNewAuthStoreBcryptCost(t *testing.T) {
|
||||
b, tPath := backend.NewDefaultTmpBackend()
|
||||
defer os.Remove(tPath)
|
||||
|
||||
tp, err := NewTokenProvider(zap.NewExample(), tokenTypeSimple, dummyIndexWaiter)
|
||||
tp, err := NewTokenProvider(zap.NewExample(), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -98,7 +98,7 @@ func TestNewAuthStoreBcryptCost(t *testing.T) {
|
||||
func setupAuthStore(t *testing.T) (store *authStore, teardownfunc func(t *testing.T)) {
|
||||
b, tPath := backend.NewDefaultTmpBackend()
|
||||
|
||||
tp, err := NewTokenProvider(zap.NewExample(), tokenTypeSimple, dummyIndexWaiter)
|
||||
tp, err := NewTokenProvider(zap.NewExample(), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -626,7 +626,7 @@ func TestAuthInfoFromCtxRace(t *testing.T) {
|
||||
b, tPath := backend.NewDefaultTmpBackend()
|
||||
defer os.Remove(tPath)
|
||||
|
||||
tp, err := NewTokenProvider(zap.NewExample(), tokenTypeSimple, dummyIndexWaiter)
|
||||
tp, err := NewTokenProvider(zap.NewExample(), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -658,6 +658,12 @@ func TestIsAdminPermitted(t *testing.T) {
|
||||
t.Errorf("expected %v, got %v", ErrUserNotFound, err)
|
||||
}
|
||||
|
||||
// empty user
|
||||
err = as.IsAdminPermitted(&AuthInfo{Username: "", Revision: 1})
|
||||
if err != ErrUserEmpty {
|
||||
t.Errorf("expected %v, got %v", ErrUserEmpty, err)
|
||||
}
|
||||
|
||||
// non-admin user
|
||||
err = as.IsAdminPermitted(&AuthInfo{Username: "foo", Revision: 1})
|
||||
if err != ErrPermissionDenied {
|
||||
@@ -692,7 +698,7 @@ func TestRecoverFromSnapshot(t *testing.T) {
|
||||
|
||||
as.Close()
|
||||
|
||||
tp, err := NewTokenProvider(zap.NewExample(), tokenTypeSimple, dummyIndexWaiter)
|
||||
tp, err := NewTokenProvider(zap.NewExample(), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -725,13 +731,13 @@ func contains(array []string, str string) bool {
|
||||
|
||||
func TestHammerSimpleAuthenticate(t *testing.T) {
|
||||
// set TTL values low to try to trigger races
|
||||
oldTTL, oldTTLRes := simpleTokenTTL, simpleTokenTTLResolution
|
||||
oldTTL, oldTTLRes := simpleTokenTTLDefault, simpleTokenTTLResolution
|
||||
defer func() {
|
||||
simpleTokenTTL = oldTTL
|
||||
simpleTokenTTLDefault = oldTTL
|
||||
simpleTokenTTLResolution = oldTTLRes
|
||||
}()
|
||||
simpleTokenTTL = 10 * time.Millisecond
|
||||
simpleTokenTTLResolution = simpleTokenTTL
|
||||
simpleTokenTTLDefault = 10 * time.Millisecond
|
||||
simpleTokenTTLResolution = simpleTokenTTLDefault
|
||||
users := make(map[string]struct{})
|
||||
|
||||
as, tearDown := setupAuthStore(t)
|
||||
@@ -774,7 +780,7 @@ func TestRolesOrder(t *testing.T) {
|
||||
b, tPath := backend.NewDefaultTmpBackend()
|
||||
defer os.Remove(tPath)
|
||||
|
||||
tp, err := NewTokenProvider(zap.NewExample(), tokenTypeSimple, dummyIndexWaiter)
|
||||
tp, err := NewTokenProvider(zap.NewExample(), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -829,7 +835,7 @@ func testAuthInfoFromCtxWithRoot(t *testing.T, opts string) {
|
||||
b, tPath := backend.NewDefaultTmpBackend()
|
||||
defer os.Remove(tPath)
|
||||
|
||||
tp, err := NewTokenProvider(zap.NewExample(), opts, dummyIndexWaiter)
|
||||
tp, err := NewTokenProvider(zap.NewExample(), opts, dummyIndexWaiter, simpleTokenTTLDefault)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@@ -44,15 +44,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/dgrijalva/jwt-go",
|
||||
"licenses": [
|
||||
{
|
||||
"type": "MIT License",
|
||||
"confidence": 0.9891304347826086
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/dustin/go-humanize",
|
||||
"licenses": [
|
||||
@@ -71,6 +62,15 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/golang-jwt/jwt",
|
||||
"licenses": [
|
||||
{
|
||||
"type": "MIT License",
|
||||
"confidence": 0.9891304347826086
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "github.com/golang/groupcache/lru",
|
||||
"licenses": [
|
||||
@@ -378,7 +378,7 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"project": "golang.org/x/sys/unix",
|
||||
"project": "golang.org/x/sys",
|
||||
"licenses": [
|
||||
{
|
||||
"type": "BSD 3-clause \"New\" or \"Revised\" License",
|
||||
|
@@ -65,8 +65,8 @@ func TestUserErrorAuth(t *testing.T) {
|
||||
authSetupRoot(t, authapi.Auth)
|
||||
|
||||
// unauthenticated client
|
||||
if _, err := authapi.UserAdd(context.TODO(), "foo", "bar"); err != rpctypes.ErrUserNotFound {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrUserNotFound, err)
|
||||
if _, err := authapi.UserAdd(context.TODO(), "foo", "bar"); err != rpctypes.ErrUserEmpty {
|
||||
t.Fatalf("expected %v, got %v", rpctypes.ErrUserEmpty, err)
|
||||
}
|
||||
|
||||
// wrong id or password
|
||||
|
@@ -582,6 +582,30 @@ func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigurableWatchProgressNotifyInterval(t *testing.T) {
|
||||
progressInterval := 200 * time.Millisecond
|
||||
clus := integration.NewClusterV3(t,
|
||||
&integration.ClusterConfig{
|
||||
Size: 3,
|
||||
WatchProgressNotifyInterval: progressInterval,
|
||||
})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
opts := []clientv3.OpOption{clientv3.WithProgressNotify()}
|
||||
rch := clus.RandClient().Watch(context.Background(), "foo", opts...)
|
||||
|
||||
timeout := 1 * time.Second // we expect to receive watch progress notify in 2 * progressInterval,
|
||||
// but for CPU-starved situation it may take longer. So we use 1 second here for timeout.
|
||||
select {
|
||||
case resp := <-rch: // waiting for a watch progress notify response
|
||||
if !resp.IsProgressNotify() {
|
||||
t.Fatalf("expected resp.IsProgressNotify() == true")
|
||||
}
|
||||
case <-time.After(timeout):
|
||||
t.Fatalf("timed out waiting for watch progress notify response in %v", timeout)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWatchRequestProgress(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
|
@@ -16,8 +16,7 @@ package ordering
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
"sync/atomic"
|
||||
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
)
|
||||
@@ -26,26 +25,18 @@ type OrderViolationFunc func(op clientv3.Op, resp clientv3.OpResponse, prevRev i
|
||||
|
||||
var ErrNoGreaterRev = errors.New("etcdclient: no cluster members have a revision higher than the previously received revision")
|
||||
|
||||
func NewOrderViolationSwitchEndpointClosure(c clientv3.Client) OrderViolationFunc {
|
||||
var mu sync.Mutex
|
||||
violationCount := 0
|
||||
return func(op clientv3.Op, resp clientv3.OpResponse, prevRev int64) error {
|
||||
if violationCount > len(c.Endpoints()) {
|
||||
func NewOrderViolationSwitchEndpointClosure(c *clientv3.Client) OrderViolationFunc {
|
||||
violationCount := int32(0)
|
||||
return func(_ clientv3.Op, _ clientv3.OpResponse, _ int64) error {
|
||||
// Each request is assigned by round-robin load-balancer's picker to a different
|
||||
// endpoints. If we cycled them 5 times (even with some level of concurrency),
|
||||
// with high probability no endpoint points on a member with fresh data.
|
||||
// TODO: Ideally we should track members (resp.opp.Header) that returned
|
||||
// stale result and explicitly temporarily disable them in 'picker'.
|
||||
if atomic.LoadInt32(&violationCount) > int32(5*len(c.Endpoints())) {
|
||||
return ErrNoGreaterRev
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
eps := c.Endpoints()
|
||||
// force client to connect to given endpoint by limiting to a single endpoint
|
||||
c.SetEndpoints(eps[violationCount%len(eps)])
|
||||
// give enough time for operation
|
||||
time.Sleep(1 * time.Second)
|
||||
// set available endpoints back to all endpoints in to ensure
|
||||
// the client has access to all the endpoints.
|
||||
c.SetEndpoints(eps...)
|
||||
// give enough time for operation
|
||||
time.Sleep(1 * time.Second)
|
||||
violationCount++
|
||||
atomic.AddInt32(&violationCount, 1)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@@ -64,19 +64,19 @@ func TestEndpointSwitchResolvesViolation(t *testing.T) {
|
||||
// NewOrderViolationSwitchEndpointClosure will be able to
|
||||
// access the full list of endpoints.
|
||||
cli.SetEndpoints(eps...)
|
||||
OrderingKv := NewKV(cli.KV, NewOrderViolationSwitchEndpointClosure(*cli))
|
||||
orderingKv := NewKV(cli.KV, NewOrderViolationSwitchEndpointClosure(cli))
|
||||
// set prevRev to the second member's revision of "foo" such that
|
||||
// the revision is higher than the third member's revision of "foo"
|
||||
_, err = OrderingKv.Get(ctx, "foo")
|
||||
_, err = orderingKv.Get(ctx, "foo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Logf("Reconfigure client to speak only to the 'partitioned' member")
|
||||
cli.SetEndpoints(clus.Members[2].GRPCAddr())
|
||||
time.Sleep(1 * time.Second) // give enough time for operation
|
||||
_, err = OrderingKv.Get(ctx, "foo", clientv3.WithSerializable())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to resolve order violation %v", err)
|
||||
_, err = orderingKv.Get(ctx, "foo", clientv3.WithSerializable())
|
||||
if err != ErrNoGreaterRev {
|
||||
t.Fatal("While speaking to partitioned leader, we should get ErrNoGreaterRev error")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -123,7 +123,7 @@ func TestUnresolvableOrderViolation(t *testing.T) {
|
||||
// access the full list of endpoints.
|
||||
cli.SetEndpoints(eps...)
|
||||
time.Sleep(1 * time.Second) // give enough time for operation
|
||||
OrderingKv := NewKV(cli.KV, NewOrderViolationSwitchEndpointClosure(*cli))
|
||||
OrderingKv := NewKV(cli.KV, NewOrderViolationSwitchEndpointClosure(cli))
|
||||
// set prevRev to the first member's revision of "foo" such that
|
||||
// the revision is higher than the fourth and fifth members' revision of "foo"
|
||||
_, err = OrderingKv.Get(ctx, "foo")
|
||||
|
@@ -105,6 +105,16 @@ func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOp
|
||||
intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
|
||||
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
ctx = withVersion(ctx)
|
||||
// getToken automatically
|
||||
// TODO(cfc4n): keep this code block, remove codes about getToken in client.go after pr #12165 merged.
|
||||
if c.authTokenBundle != nil {
|
||||
// equal to c.Username != "" && c.Password != ""
|
||||
err := c.getToken(ctx)
|
||||
if err != nil && rpctypes.Error(err) != rpctypes.ErrAuthNotEnabled {
|
||||
logger.Error("clientv3/retry_interceptor: getToken failed", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
grpcOpts, retryOpts := filterCallOptions(opts)
|
||||
callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
|
||||
// short circuit for simplicity, and avoiding allocations.
|
||||
|
@@ -25,6 +25,7 @@ import (
|
||||
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
|
||||
mvccpb "go.etcd.io/etcd/mvcc/mvccpb"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
@@ -140,6 +141,7 @@ type watcher struct {
|
||||
|
||||
// streams holds all the active grpc streams keyed by ctx value.
|
||||
streams map[string]*watchGrpcStream
|
||||
lg *zap.Logger
|
||||
}
|
||||
|
||||
// watchGrpcStream tracks all watch resources attached to a single grpc stream.
|
||||
@@ -176,6 +178,8 @@ type watchGrpcStream struct {
|
||||
resumec chan struct{}
|
||||
// closeErr is the error that closed the watch stream
|
||||
closeErr error
|
||||
|
||||
lg *zap.Logger
|
||||
}
|
||||
|
||||
// watchStreamRequest is a union of the supported watch request operation types
|
||||
@@ -242,6 +246,7 @@ func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher {
|
||||
}
|
||||
if c != nil {
|
||||
w.callOpts = c.callOpts
|
||||
w.lg = c.lg
|
||||
}
|
||||
return w
|
||||
}
|
||||
@@ -273,6 +278,7 @@ func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
|
||||
errc: make(chan error, 1),
|
||||
closingc: make(chan *watcherStream),
|
||||
resumec: make(chan struct{}),
|
||||
lg: w.lg,
|
||||
}
|
||||
go wgs.run()
|
||||
return wgs
|
||||
@@ -544,10 +550,18 @@ func (w *watchGrpcStream) run() {
|
||||
w.resuming = append(w.resuming, ws)
|
||||
if len(w.resuming) == 1 {
|
||||
// head of resume queue, can register a new watcher
|
||||
wc.Send(ws.initReq.toPB())
|
||||
if err := wc.Send(ws.initReq.toPB()); err != nil {
|
||||
if w.lg != nil {
|
||||
w.lg.Debug("error when sending request", zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
case *progressRequest:
|
||||
wc.Send(wreq.toPB())
|
||||
if err := wc.Send(wreq.toPB()); err != nil {
|
||||
if w.lg != nil {
|
||||
w.lg.Debug("error when sending request", zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// new events from the watch client
|
||||
@@ -571,7 +585,11 @@ func (w *watchGrpcStream) run() {
|
||||
}
|
||||
|
||||
if ws := w.nextResume(); ws != nil {
|
||||
wc.Send(ws.initReq.toPB())
|
||||
if err := wc.Send(ws.initReq.toPB()); err != nil {
|
||||
if w.lg != nil {
|
||||
w.lg.Debug("error when sending request", zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// reset for next iteration
|
||||
@@ -616,7 +634,14 @@ func (w *watchGrpcStream) run() {
|
||||
},
|
||||
}
|
||||
req := &pb.WatchRequest{RequestUnion: cr}
|
||||
wc.Send(req)
|
||||
if w.lg != nil {
|
||||
w.lg.Debug("sending watch cancel request for failed dispatch", zap.Int64("watch-id", pbresp.WatchId))
|
||||
}
|
||||
if err := wc.Send(req); err != nil {
|
||||
if w.lg != nil {
|
||||
w.lg.Debug("failed to send watch cancel request", zap.Int64("watch-id", pbresp.WatchId), zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// watch client failed on Recv; spawn another if possible
|
||||
@@ -629,7 +654,11 @@ func (w *watchGrpcStream) run() {
|
||||
return
|
||||
}
|
||||
if ws := w.nextResume(); ws != nil {
|
||||
wc.Send(ws.initReq.toPB())
|
||||
if err := wc.Send(ws.initReq.toPB()); err != nil {
|
||||
if w.lg != nil {
|
||||
w.lg.Debug("error when sending request", zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
cancelSet = make(map[int64]struct{})
|
||||
|
||||
@@ -637,6 +666,25 @@ func (w *watchGrpcStream) run() {
|
||||
return
|
||||
|
||||
case ws := <-w.closingc:
|
||||
if ws.id != -1 {
|
||||
// client is closing an established watch; close it on the server proactively instead of waiting
|
||||
// to close when the next message arrives
|
||||
cancelSet[ws.id] = struct{}{}
|
||||
cr := &pb.WatchRequest_CancelRequest{
|
||||
CancelRequest: &pb.WatchCancelRequest{
|
||||
WatchId: ws.id,
|
||||
},
|
||||
}
|
||||
req := &pb.WatchRequest{RequestUnion: cr}
|
||||
if w.lg != nil {
|
||||
w.lg.Debug("sending watch cancel request for closed watcher", zap.Int64("watch-id", ws.id))
|
||||
}
|
||||
if err := wc.Send(req); err != nil {
|
||||
if w.lg != nil {
|
||||
w.lg.Debug("failed to send watch cancel request", zap.Int64("watch-id", ws.id), zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
w.closeSubstream(ws)
|
||||
delete(closing, ws)
|
||||
// no more watchers on this stream, shutdown
|
||||
|
@@ -53,6 +53,7 @@ const (
|
||||
DefaultMaxSnapshots = 5
|
||||
DefaultMaxWALs = 5
|
||||
DefaultMaxTxnOps = uint(128)
|
||||
DefaultWarningApplyDuration = 100 * time.Millisecond
|
||||
DefaultMaxRequestBytes = 1.5 * 1024 * 1024
|
||||
DefaultGRPCKeepAliveMinTime = 5 * time.Second
|
||||
DefaultGRPCKeepAliveInterval = 2 * time.Hour
|
||||
@@ -273,14 +274,21 @@ type Config struct {
|
||||
AuthToken string `json:"auth-token"`
|
||||
BcryptCost uint `json:"bcrypt-cost"`
|
||||
|
||||
//The AuthTokenTTL in seconds of the simple token
|
||||
AuthTokenTTL uint `json:"auth-token-ttl"`
|
||||
|
||||
ExperimentalInitialCorruptCheck bool `json:"experimental-initial-corrupt-check"`
|
||||
ExperimentalCorruptCheckTime time.Duration `json:"experimental-corrupt-check-time"`
|
||||
ExperimentalEnableV2V3 string `json:"experimental-enable-v2v3"`
|
||||
// ExperimentalBackendFreelistType specifies the type of freelist that boltdb backend uses (array and map are supported types).
|
||||
ExperimentalBackendFreelistType string `json:"experimental-backend-bbolt-freelist-type"`
|
||||
// ExperimentalEnableLeaseCheckpoint enables primary lessor to persist lease remainingTTL to prevent indefinite auto-renewal of long lived leases.
|
||||
ExperimentalEnableLeaseCheckpoint bool `json:"experimental-enable-lease-checkpoint"`
|
||||
ExperimentalCompactionBatchLimit int `json:"experimental-compaction-batch-limit"`
|
||||
ExperimentalEnableLeaseCheckpoint bool `json:"experimental-enable-lease-checkpoint"`
|
||||
ExperimentalCompactionBatchLimit int `json:"experimental-compaction-batch-limit"`
|
||||
ExperimentalWatchProgressNotifyInterval time.Duration `json:"experimental-watch-progress-notify-interval"`
|
||||
// ExperimentalWarningApplyDuration is the time duration after which a warning is generated if applying request
|
||||
// takes more time than this value.
|
||||
ExperimentalWarningApplyDuration time.Duration `json:"experimental-warning-apply-duration"`
|
||||
|
||||
// ForceNewCluster starts a new cluster even if previously started; unsafe.
|
||||
ForceNewCluster bool `json:"force-new-cluster"`
|
||||
@@ -335,6 +343,10 @@ type Config struct {
|
||||
// Only valid if "logger" option is "capnslog".
|
||||
// WARN: DO NOT USE THIS!
|
||||
LogPkgLevels string `json:"log-package-levels"`
|
||||
|
||||
// UnsafeNoFsync disables all uses of fsync.
|
||||
// Setting this is unsafe and will cause data loss.
|
||||
UnsafeNoFsync bool `json:"unsafe-no-fsync"`
|
||||
}
|
||||
|
||||
// configYAML holds the config suitable for yaml parsing
|
||||
@@ -380,8 +392,9 @@ func NewConfig() *Config {
|
||||
SnapshotCount: etcdserver.DefaultSnapshotCount,
|
||||
SnapshotCatchUpEntries: etcdserver.DefaultSnapshotCatchUpEntries,
|
||||
|
||||
MaxTxnOps: DefaultMaxTxnOps,
|
||||
MaxRequestBytes: DefaultMaxRequestBytes,
|
||||
MaxTxnOps: DefaultMaxTxnOps,
|
||||
MaxRequestBytes: DefaultMaxRequestBytes,
|
||||
ExperimentalWarningApplyDuration: DefaultWarningApplyDuration,
|
||||
|
||||
GRPCKeepAliveMinTime: DefaultGRPCKeepAliveMinTime,
|
||||
GRPCKeepAliveInterval: DefaultGRPCKeepAliveInterval,
|
||||
@@ -406,8 +419,9 @@ func NewConfig() *Config {
|
||||
CORS: map[string]struct{}{"*": {}},
|
||||
HostWhitelist: map[string]struct{}{"*": {}},
|
||||
|
||||
AuthToken: "simple",
|
||||
BcryptCost: uint(bcrypt.DefaultCost),
|
||||
AuthToken: "simple",
|
||||
BcryptCost: uint(bcrypt.DefaultCost),
|
||||
AuthTokenTTL: 300,
|
||||
|
||||
PreVote: false, // TODO: enable by default in v3.5
|
||||
|
||||
|
@@ -178,9 +178,11 @@ func TestAutoCompactionModeParse(t *testing.T) {
|
||||
{"revision", "1", false, 1},
|
||||
{"revision", "1h", false, time.Hour},
|
||||
{"revision", "a", true, 0},
|
||||
{"revision", "-1", true, 0},
|
||||
// periodic
|
||||
{"periodic", "1", false, time.Hour},
|
||||
{"periodic", "a", true, 0},
|
||||
{"revision", "-1", true, 0},
|
||||
// err mode
|
||||
{"errmode", "1", false, 0},
|
||||
{"errmode", "1h", false, time.Hour},
|
||||
|
@@ -162,50 +162,54 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
|
||||
backendFreelistType := parseBackendFreelistType(cfg.ExperimentalBackendFreelistType)
|
||||
|
||||
srvcfg := etcdserver.ServerConfig{
|
||||
Name: cfg.Name,
|
||||
ClientURLs: cfg.ACUrls,
|
||||
PeerURLs: cfg.APUrls,
|
||||
DataDir: cfg.Dir,
|
||||
DedicatedWALDir: cfg.WalDir,
|
||||
SnapshotCount: cfg.SnapshotCount,
|
||||
SnapshotCatchUpEntries: cfg.SnapshotCatchUpEntries,
|
||||
MaxSnapFiles: cfg.MaxSnapFiles,
|
||||
MaxWALFiles: cfg.MaxWalFiles,
|
||||
InitialPeerURLsMap: urlsmap,
|
||||
InitialClusterToken: token,
|
||||
DiscoveryURL: cfg.Durl,
|
||||
DiscoveryProxy: cfg.Dproxy,
|
||||
NewCluster: cfg.IsNewCluster(),
|
||||
PeerTLSInfo: cfg.PeerTLSInfo,
|
||||
TickMs: cfg.TickMs,
|
||||
ElectionTicks: cfg.ElectionTicks(),
|
||||
InitialElectionTickAdvance: cfg.InitialElectionTickAdvance,
|
||||
AutoCompactionRetention: autoCompactionRetention,
|
||||
AutoCompactionMode: cfg.AutoCompactionMode,
|
||||
QuotaBackendBytes: cfg.QuotaBackendBytes,
|
||||
BackendBatchLimit: cfg.BackendBatchLimit,
|
||||
BackendFreelistType: backendFreelistType,
|
||||
BackendBatchInterval: cfg.BackendBatchInterval,
|
||||
MaxTxnOps: cfg.MaxTxnOps,
|
||||
MaxRequestBytes: cfg.MaxRequestBytes,
|
||||
StrictReconfigCheck: cfg.StrictReconfigCheck,
|
||||
ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth,
|
||||
AuthToken: cfg.AuthToken,
|
||||
BcryptCost: cfg.BcryptCost,
|
||||
CORS: cfg.CORS,
|
||||
HostWhitelist: cfg.HostWhitelist,
|
||||
InitialCorruptCheck: cfg.ExperimentalInitialCorruptCheck,
|
||||
CorruptCheckTime: cfg.ExperimentalCorruptCheckTime,
|
||||
PreVote: cfg.PreVote,
|
||||
Logger: cfg.logger,
|
||||
LoggerConfig: cfg.loggerConfig,
|
||||
LoggerCore: cfg.loggerCore,
|
||||
LoggerWriteSyncer: cfg.loggerWriteSyncer,
|
||||
Debug: cfg.Debug,
|
||||
ForceNewCluster: cfg.ForceNewCluster,
|
||||
EnableGRPCGateway: cfg.EnableGRPCGateway,
|
||||
EnableLeaseCheckpoint: cfg.ExperimentalEnableLeaseCheckpoint,
|
||||
CompactionBatchLimit: cfg.ExperimentalCompactionBatchLimit,
|
||||
Name: cfg.Name,
|
||||
ClientURLs: cfg.ACUrls,
|
||||
PeerURLs: cfg.APUrls,
|
||||
DataDir: cfg.Dir,
|
||||
DedicatedWALDir: cfg.WalDir,
|
||||
SnapshotCount: cfg.SnapshotCount,
|
||||
SnapshotCatchUpEntries: cfg.SnapshotCatchUpEntries,
|
||||
MaxSnapFiles: cfg.MaxSnapFiles,
|
||||
MaxWALFiles: cfg.MaxWalFiles,
|
||||
InitialPeerURLsMap: urlsmap,
|
||||
InitialClusterToken: token,
|
||||
DiscoveryURL: cfg.Durl,
|
||||
DiscoveryProxy: cfg.Dproxy,
|
||||
NewCluster: cfg.IsNewCluster(),
|
||||
PeerTLSInfo: cfg.PeerTLSInfo,
|
||||
TickMs: cfg.TickMs,
|
||||
ElectionTicks: cfg.ElectionTicks(),
|
||||
InitialElectionTickAdvance: cfg.InitialElectionTickAdvance,
|
||||
AutoCompactionRetention: autoCompactionRetention,
|
||||
AutoCompactionMode: cfg.AutoCompactionMode,
|
||||
QuotaBackendBytes: cfg.QuotaBackendBytes,
|
||||
BackendBatchLimit: cfg.BackendBatchLimit,
|
||||
BackendFreelistType: backendFreelistType,
|
||||
BackendBatchInterval: cfg.BackendBatchInterval,
|
||||
MaxTxnOps: cfg.MaxTxnOps,
|
||||
MaxRequestBytes: cfg.MaxRequestBytes,
|
||||
StrictReconfigCheck: cfg.StrictReconfigCheck,
|
||||
ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth,
|
||||
AuthToken: cfg.AuthToken,
|
||||
BcryptCost: cfg.BcryptCost,
|
||||
TokenTTL: cfg.AuthTokenTTL,
|
||||
CORS: cfg.CORS,
|
||||
HostWhitelist: cfg.HostWhitelist,
|
||||
InitialCorruptCheck: cfg.ExperimentalInitialCorruptCheck,
|
||||
CorruptCheckTime: cfg.ExperimentalCorruptCheckTime,
|
||||
PreVote: cfg.PreVote,
|
||||
Logger: cfg.logger,
|
||||
LoggerConfig: cfg.loggerConfig,
|
||||
LoggerCore: cfg.loggerCore,
|
||||
LoggerWriteSyncer: cfg.loggerWriteSyncer,
|
||||
Debug: cfg.Debug,
|
||||
ForceNewCluster: cfg.ForceNewCluster,
|
||||
EnableGRPCGateway: cfg.EnableGRPCGateway,
|
||||
UnsafeNoFsync: cfg.UnsafeNoFsync,
|
||||
EnableLeaseCheckpoint: cfg.ExperimentalEnableLeaseCheckpoint,
|
||||
CompactionBatchLimit: cfg.ExperimentalCompactionBatchLimit,
|
||||
WatchProgressNotifyInterval: cfg.ExperimentalWatchProgressNotifyInterval,
|
||||
WarningApplyDuration: cfg.ExperimentalWarningApplyDuration,
|
||||
}
|
||||
print(e.cfg.logger, *cfg, srvcfg, memberInitialized)
|
||||
if e.Server, err = etcdserver.NewServer(srvcfg); err != nil {
|
||||
@@ -811,7 +815,7 @@ func (e *Etcd) GetLogger() *zap.Logger {
|
||||
|
||||
func parseCompactionRetention(mode, retention string) (ret time.Duration, err error) {
|
||||
h, err := strconv.Atoi(retention)
|
||||
if err == nil {
|
||||
if err == nil && h >= 0 {
|
||||
switch mode {
|
||||
case CompactorModeRevision:
|
||||
ret = time.Duration(int64(h))
|
||||
|
@@ -19,6 +19,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
defaultLog "log"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
@@ -229,6 +230,10 @@ func (sctx *serveCtx) registerGateway(opts []grpc.DialOption) (*gw.ServeMux, err
|
||||
addr = fmt.Sprintf("%s://%s", network, addr)
|
||||
}
|
||||
|
||||
opts = append(opts, grpc.WithDefaultCallOptions([]grpc.CallOption{
|
||||
grpc.MaxCallRecvMsgSize(math.MaxInt32),
|
||||
}...))
|
||||
|
||||
conn, err := grpc.DialContext(ctx, addr, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -286,6 +291,7 @@ func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.
|
||||
return outgoing
|
||||
},
|
||||
),
|
||||
wsproxy.WithMaxRespBodyBufferSize(0x7fffffff),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
@@ -311,6 +311,8 @@ func newCheckDatascaleCommand(cmd *cobra.Command, args []string) {
|
||||
ExitWithError(ExitError, errEndpoints)
|
||||
}
|
||||
|
||||
sec := secureCfgFromCmd(cmd)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
resp, err := clients[0].Get(ctx, checkDatascalePrefix, v3.WithPrefix(), v3.WithLimit(1))
|
||||
cancel()
|
||||
@@ -329,7 +331,7 @@ func newCheckDatascaleCommand(cmd *cobra.Command, args []string) {
|
||||
wg.Add(len(clients))
|
||||
|
||||
// get the process_resident_memory_bytes and process_virtual_memory_bytes before the put operations
|
||||
bytesBefore := endpointMemoryMetrics(eps[0])
|
||||
bytesBefore := endpointMemoryMetrics(eps[0], sec)
|
||||
if bytesBefore == 0 {
|
||||
fmt.Println("FAIL: Could not read process_resident_memory_bytes before the put operations.")
|
||||
os.Exit(ExitError)
|
||||
@@ -367,7 +369,7 @@ func newCheckDatascaleCommand(cmd *cobra.Command, args []string) {
|
||||
s := <-sc
|
||||
|
||||
// get the process_resident_memory_bytes after the put operations
|
||||
bytesAfter := endpointMemoryMetrics(eps[0])
|
||||
bytesAfter := endpointMemoryMetrics(eps[0], sec)
|
||||
if bytesAfter == 0 {
|
||||
fmt.Println("FAIL: Could not read process_resident_memory_bytes after the put operations.")
|
||||
os.Exit(ExitError)
|
||||
|
@@ -16,6 +16,7 @@ package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -90,14 +91,26 @@ func isCommandTimeoutFlagSet(cmd *cobra.Command) bool {
|
||||
return commandTimeoutFlag.Changed
|
||||
}
|
||||
|
||||
// get the process_resident_memory_bytes from <server:2379>/metrics
|
||||
func endpointMemoryMetrics(host string) float64 {
|
||||
// get the process_resident_memory_bytes from <server>/metrics
|
||||
func endpointMemoryMetrics(host string, scfg *secureCfg) float64 {
|
||||
residentMemoryKey := "process_resident_memory_bytes"
|
||||
var residentMemoryValue string
|
||||
if !strings.HasPrefix(host, `http://`) {
|
||||
if !strings.HasPrefix(host, "http://") && !strings.HasPrefix(host, "https://") {
|
||||
host = "http://" + host
|
||||
}
|
||||
url := host + "/metrics"
|
||||
if strings.HasPrefix(host, "https://") {
|
||||
// load client certificate
|
||||
cert, err := tls.LoadX509KeyPair(scfg.cert, scfg.key)
|
||||
if err != nil {
|
||||
fmt.Println(fmt.Sprintf("client certificate error: %v", err))
|
||||
return 0.0
|
||||
}
|
||||
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
InsecureSkipVerify: scfg.insecureSkipVerify,
|
||||
}
|
||||
}
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
fmt.Println(fmt.Sprintf("fetch error: %v", err))
|
||||
|
@@ -60,7 +60,7 @@ func init() {
|
||||
// TODO: secure by default when etcd enables secure gRPC by default.
|
||||
rootCmd.PersistentFlags().BoolVar(&globalFlags.Insecure, "insecure-transport", true, "disable transport security for client connections")
|
||||
rootCmd.PersistentFlags().BoolVar(&globalFlags.InsecureDiscovery, "insecure-discovery", true, "accept insecure SRV records describing cluster endpoints")
|
||||
rootCmd.PersistentFlags().BoolVar(&globalFlags.InsecureSkipVerify, "insecure-skip-tls-verify", false, "skip server certificate verification")
|
||||
rootCmd.PersistentFlags().BoolVar(&globalFlags.InsecureSkipVerify, "insecure-skip-tls-verify", false, "skip server certificate verification (CAUTION: this option should be enabled only for testing purposes)")
|
||||
rootCmd.PersistentFlags().StringVar(&globalFlags.TLS.CertFile, "cert", "", "identify secure client using this TLS certificate file")
|
||||
rootCmd.PersistentFlags().StringVar(&globalFlags.TLS.KeyFile, "key", "", "identify secure client using this TLS key file")
|
||||
rootCmd.PersistentFlags().StringVar(&globalFlags.TLS.TrustedCAFile, "cacert", "", "verify certificates of TLS-enabled secure servers using this CA bundle")
|
||||
|
@@ -245,6 +245,7 @@ func newConfig() *config {
|
||||
// auth
|
||||
fs.StringVar(&cfg.ec.AuthToken, "auth-token", cfg.ec.AuthToken, "Specify auth token specific options.")
|
||||
fs.UintVar(&cfg.ec.BcryptCost, "bcrypt-cost", cfg.ec.BcryptCost, "Specify bcrypt algorithm cost factor for auth password hashing.")
|
||||
fs.UintVar(&cfg.ec.AuthTokenTTL, "auth-token-ttl", cfg.ec.AuthTokenTTL, "The lifetime in seconds of the auth token.")
|
||||
|
||||
// gateway
|
||||
fs.BoolVar(&cfg.ec.EnableGRPCGateway, "enable-grpc-gateway", true, "Enable GRPC gateway.")
|
||||
@@ -256,8 +257,11 @@ func newConfig() *config {
|
||||
fs.StringVar(&cfg.ec.ExperimentalBackendFreelistType, "experimental-backend-bbolt-freelist-type", cfg.ec.ExperimentalBackendFreelistType, "ExperimentalBackendFreelistType specifies the type of freelist that boltdb backend uses(array and map are supported types)")
|
||||
fs.BoolVar(&cfg.ec.ExperimentalEnableLeaseCheckpoint, "experimental-enable-lease-checkpoint", false, "Enable to persist lease remaining TTL to prevent indefinite auto-renewal of long lived leases.")
|
||||
fs.IntVar(&cfg.ec.ExperimentalCompactionBatchLimit, "experimental-compaction-batch-limit", cfg.ec.ExperimentalCompactionBatchLimit, "Sets the maximum revisions deleted in each compaction batch.")
|
||||
fs.DurationVar(&cfg.ec.ExperimentalWatchProgressNotifyInterval, "experimental-watch-progress-notify-interval", cfg.ec.ExperimentalWatchProgressNotifyInterval, "Duration of periodic watch progress notifications.")
|
||||
fs.DurationVar(&cfg.ec.ExperimentalWarningApplyDuration, "experimental-warning-apply-duration", cfg.ec.ExperimentalWarningApplyDuration, "Time duration after which a warning is generated if request takes more time.")
|
||||
|
||||
// unsafe
|
||||
fs.BoolVar(&cfg.ec.UnsafeNoFsync, "unsafe-no-fsync", false, "Disables fsync, unsafe, will cause data loss.")
|
||||
fs.BoolVar(&cfg.ec.ForceNewCluster, "force-new-cluster", false, "Force to create a new one member cluster.")
|
||||
|
||||
// ignored
|
||||
|
@@ -358,7 +358,7 @@ func startProxy(cfg *config) error {
|
||||
}
|
||||
|
||||
cfg.ec.Dir = filepath.Join(cfg.ec.Dir, "proxy")
|
||||
err = os.MkdirAll(cfg.ec.Dir, fileutil.PrivateDirMode)
|
||||
err = fileutil.TouchDirAll(cfg.ec.Dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -71,7 +71,7 @@ func newGatewayStartCommand() *cobra.Command {
|
||||
cmd.Flags().StringVar(&gatewayDNSCluster, "discovery-srv", "", "DNS domain used to bootstrap initial cluster")
|
||||
cmd.Flags().StringVar(&gatewayDNSClusterServiceName, "discovery-srv-name", "", "service name to query when using DNS discovery")
|
||||
cmd.Flags().BoolVar(&gatewayInsecureDiscovery, "insecure-discovery", false, "accept insecure SRV records")
|
||||
cmd.Flags().StringVar(&gatewayCA, "trusted-ca-file", "", "path to the client server TLS CA file.")
|
||||
cmd.Flags().StringVar(&gatewayCA, "trusted-ca-file", "", "path to the client server TLS CA file for verifying the discovered endpoints when discovery-srv is provided.")
|
||||
|
||||
cmd.Flags().StringSliceVar(&gatewayEndpoints, "endpoints", []string{"127.0.0.1:2379"}, "comma separated etcd cluster endpoints")
|
||||
|
||||
@@ -119,6 +119,41 @@ func startGateway(cmd *cobra.Command, args []string) {
|
||||
}
|
||||
}
|
||||
|
||||
lhost, lport, err := net.SplitHostPort(gatewayListenAddr)
|
||||
if err != nil {
|
||||
fmt.Println("failed to validate listen address:", gatewayListenAddr)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
laddrs, err := net.LookupHost(lhost)
|
||||
if err != nil {
|
||||
fmt.Println("failed to resolve listen host:", lhost)
|
||||
os.Exit(1)
|
||||
}
|
||||
laddrsMap := make(map[string]bool)
|
||||
for _, addr := range laddrs {
|
||||
laddrsMap[addr] = true
|
||||
}
|
||||
|
||||
for _, srv := range srvs.SRVs {
|
||||
var eaddrs []string
|
||||
eaddrs, err = net.LookupHost(srv.Target)
|
||||
if err != nil {
|
||||
fmt.Println("failed to resolve endpoint host:", srv.Target)
|
||||
os.Exit(1)
|
||||
}
|
||||
if fmt.Sprintf("%d", srv.Port) != lport {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ea := range eaddrs {
|
||||
if laddrsMap[ea] {
|
||||
fmt.Printf("SRV or endpoint (%s:%d->%s:%d) should not resolve to the gateway listen addr (%s)\n", srv.Target, srv.Port, ea, srv.Port, gatewayListenAddr)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(srvs.Endpoints) == 0 {
|
||||
fmt.Println("no endpoints found")
|
||||
os.Exit(1)
|
||||
|
@@ -131,7 +131,7 @@ func newGRPCProxyStartCommand() *cobra.Command {
|
||||
cmd.Flags().StringVar(&grpcProxyCert, "cert", "", "identify secure connections with etcd servers using this TLS certificate file")
|
||||
cmd.Flags().StringVar(&grpcProxyKey, "key", "", "identify secure connections with etcd servers using this TLS key file")
|
||||
cmd.Flags().StringVar(&grpcProxyCA, "cacert", "", "verify certificates of TLS-enabled secure etcd servers using this CA bundle")
|
||||
cmd.Flags().BoolVar(&grpcProxyInsecureSkipTLSVerify, "insecure-skip-tls-verify", false, "skip authentication of etcd server TLS certificates")
|
||||
cmd.Flags().BoolVar(&grpcProxyInsecureSkipTLSVerify, "insecure-skip-tls-verify", false, "skip authentication of etcd server TLS certificates (CAUTION: this option should be enabled only for testing purposes)")
|
||||
|
||||
// client TLS for connecting to proxy
|
||||
cmd.Flags().StringVar(&grpcProxyListenCert, "cert-file", "", "identify secure connections to the proxy using this TLS certificate file")
|
||||
@@ -286,6 +286,9 @@ func newClientCfg(lg *zap.Logger, eps []string) (*clientv3.Config, error) {
|
||||
return nil, err
|
||||
}
|
||||
clientTLS.InsecureSkipVerify = grpcProxyInsecureSkipTLSVerify
|
||||
if clientTLS.InsecureSkipVerify {
|
||||
lg.Warn("--insecure-skip-tls-verify was given, this grpc proxy process skips authentication of etcd server TLS certificates. This option should be enabled only for testing purposes.")
|
||||
}
|
||||
cfg.TLS = clientTLS
|
||||
lg.Info("gRPC proxy client TLS", zap.String("tls-info", fmt.Sprintf("%+v", tls)))
|
||||
}
|
||||
@@ -323,7 +326,7 @@ func mustListenCMux(lg *zap.Logger, tlsinfo *transport.TLSInfo) cmux.CMux {
|
||||
|
||||
func newGRPCProxyServer(lg *zap.Logger, client *clientv3.Client) *grpc.Server {
|
||||
if grpcProxyEnableOrdering {
|
||||
vf := ordering.NewOrderViolationSwitchEndpointClosure(*client)
|
||||
vf := ordering.NewOrderViolationSwitchEndpointClosure(client)
|
||||
client.KV = ordering.NewKV(client.KV, vf)
|
||||
lg.Info("waiting for linearized read from cluster to recover ordering")
|
||||
for {
|
||||
|
@@ -162,6 +162,8 @@ Auth:
|
||||
Specify a v3 authentication token type and its options ('simple' or 'jwt').
|
||||
--bcrypt-cost ` + fmt.Sprintf("%d", bcrypt.DefaultCost) + `
|
||||
Specify the cost / strength of the bcrypt algorithm for hashing auth passwords. Valid values are between ` + fmt.Sprintf("%d", bcrypt.MinCost) + ` and ` + fmt.Sprintf("%d", bcrypt.MaxCost) + `.
|
||||
--auth-token-ttl 300
|
||||
Time (in seconds) of the auth-token-ttl.
|
||||
|
||||
Profiling and Monitoring:
|
||||
--enable-pprof 'false'
|
||||
@@ -208,6 +210,10 @@ Experimental feature:
|
||||
ExperimentalCompactionBatchLimit sets the maximum revisions deleted in each compaction batch.
|
||||
--experimental-peer-skip-client-san-verification 'false'
|
||||
Skip verification of SAN field in client certificate for peer connections.
|
||||
--experimental-watch-progress-notify-interval '10m'
|
||||
Duration of periodical watch progress notification.
|
||||
--experimental-warning-apply-duration '100ms'
|
||||
Warning is generated if requests take more than this duration.
|
||||
|
||||
Unsafe feature:
|
||||
--force-new-cluster 'false'
|
||||
|
@@ -36,7 +36,7 @@ const (
|
||||
// HandleMetricsHealth registers metrics and health handlers.
|
||||
func HandleMetricsHealth(mux *http.ServeMux, srv etcdserver.ServerV2) {
|
||||
mux.Handle(PathMetrics, promhttp.Handler())
|
||||
mux.Handle(PathHealth, NewHealthHandler(func() Health { return checkHealth(srv) }))
|
||||
mux.Handle(PathHealth, NewHealthHandler(func(excludedAlarms AlarmSet) Health { return checkHealth(srv, excludedAlarms) }))
|
||||
}
|
||||
|
||||
// HandlePrometheus registers prometheus handler on '/metrics'.
|
||||
@@ -45,7 +45,7 @@ func HandlePrometheus(mux *http.ServeMux) {
|
||||
}
|
||||
|
||||
// NewHealthHandler handles '/health' requests.
|
||||
func NewHealthHandler(hfunc func() Health) http.HandlerFunc {
|
||||
func NewHealthHandler(hfunc func(excludedAlarms AlarmSet) Health) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
w.Header().Set("Allow", http.MethodGet)
|
||||
@@ -53,7 +53,8 @@ func NewHealthHandler(hfunc func() Health) http.HandlerFunc {
|
||||
plog.Warningf("/health error (status code %d)", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
h := hfunc()
|
||||
excludedAlarms := getExcludedAlarms(r)
|
||||
h := hfunc(excludedAlarms)
|
||||
d, _ := json.Marshal(h)
|
||||
if h.Health != "true" {
|
||||
http.Error(w, string(d), http.StatusServiceUnavailable)
|
||||
@@ -90,19 +91,46 @@ type Health struct {
|
||||
Health string `json:"health"`
|
||||
}
|
||||
|
||||
type AlarmSet map[string]struct{}
|
||||
|
||||
func getExcludedAlarms(r *http.Request) (alarms AlarmSet) {
|
||||
alarms = make(map[string]struct{}, 2)
|
||||
alms, found := r.URL.Query()["exclude"]
|
||||
if found {
|
||||
for _, alm := range alms {
|
||||
if len(alms) == 0 {
|
||||
continue
|
||||
}
|
||||
alarms[alm] = struct{}{}
|
||||
}
|
||||
}
|
||||
return alarms
|
||||
}
|
||||
|
||||
// TODO: server NOSPACE, etcdserver.ErrNoLeader in health API
|
||||
|
||||
func checkHealth(srv etcdserver.ServerV2) Health {
|
||||
func checkHealth(srv etcdserver.ServerV2, excludedAlarms AlarmSet) Health {
|
||||
h := Health{Health: "true"}
|
||||
|
||||
as := srv.Alarms()
|
||||
if len(as) > 0 {
|
||||
h.Health = "false"
|
||||
for _, v := range as {
|
||||
plog.Warningf("/health error due to an alarm %s", v.String())
|
||||
alarmName := v.Alarm.String()
|
||||
if _, found := excludedAlarms[alarmName]; found {
|
||||
plog.Debugf("/health excluded alarm %s", alarmName)
|
||||
delete(excludedAlarms, alarmName)
|
||||
continue
|
||||
}
|
||||
h.Health = "false"
|
||||
plog.Warningf("/health error due to %s", v.String())
|
||||
return h
|
||||
}
|
||||
}
|
||||
|
||||
if len(excludedAlarms) > 0 {
|
||||
plog.Warningf("fail exclude alarms from health check, exclude alarms %+v", excludedAlarms)
|
||||
}
|
||||
|
||||
if h.Health == "true" {
|
||||
if uint64(srv.Leader()) == raft.None {
|
||||
h.Health = "false"
|
||||
@@ -122,7 +150,7 @@ func checkHealth(srv etcdserver.ServerV2) Health {
|
||||
|
||||
if h.Health == "true" {
|
||||
healthSuccess.Inc()
|
||||
plog.Infof("/health OK (status code %d)", http.StatusOK)
|
||||
plog.Debugf("/health OK (status code %d)", http.StatusOK)
|
||||
} else {
|
||||
healthFailed.Inc()
|
||||
}
|
||||
|
151
etcdserver/api/etcdhttp/metrics_test.go
Normal file
151
etcdserver/api/etcdhttp/metrics_test.go
Normal file
@@ -0,0 +1,151 @@
|
||||
// Copyright 2021 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdhttp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"go.etcd.io/etcd/etcdserver"
|
||||
stats "go.etcd.io/etcd/etcdserver/api/v2stats"
|
||||
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
|
||||
"go.etcd.io/etcd/pkg/testutil"
|
||||
"go.etcd.io/etcd/pkg/types"
|
||||
"go.etcd.io/etcd/raft"
|
||||
)
|
||||
|
||||
type fakeStats struct{}
|
||||
|
||||
func (s *fakeStats) SelfStats() []byte { return nil }
|
||||
func (s *fakeStats) LeaderStats() []byte { return nil }
|
||||
func (s *fakeStats) StoreStats() []byte { return nil }
|
||||
|
||||
type fakeServerV2 struct {
|
||||
fakeServer
|
||||
stats.Stats
|
||||
health string
|
||||
}
|
||||
|
||||
func (s *fakeServerV2) Leader() types.ID {
|
||||
if s.health == "true" {
|
||||
return 1
|
||||
}
|
||||
return types.ID(raft.None)
|
||||
}
|
||||
func (s *fakeServerV2) Do(ctx context.Context, r pb.Request) (etcdserver.Response, error) {
|
||||
if s.health == "true" {
|
||||
return etcdserver.Response{}, nil
|
||||
}
|
||||
return etcdserver.Response{}, fmt.Errorf("fail health check")
|
||||
}
|
||||
func (s *fakeServerV2) ClientCertAuthEnabled() bool { return false }
|
||||
|
||||
func TestHealthHandler(t *testing.T) {
|
||||
// define the input and expected output
|
||||
// input: alarms, and healthCheckURL
|
||||
tests := []struct {
|
||||
alarms []*pb.AlarmMember
|
||||
healthCheckURL string
|
||||
statusCode int
|
||||
health string
|
||||
}{
|
||||
{
|
||||
[]*pb.AlarmMember{},
|
||||
"/health",
|
||||
http.StatusOK,
|
||||
"true",
|
||||
},
|
||||
{
|
||||
[]*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}},
|
||||
"/health",
|
||||
http.StatusServiceUnavailable,
|
||||
"false",
|
||||
},
|
||||
{
|
||||
[]*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}},
|
||||
"/health?exclude=NOSPACE",
|
||||
http.StatusOK,
|
||||
"true",
|
||||
},
|
||||
{
|
||||
[]*pb.AlarmMember{},
|
||||
"/health?exclude=NOSPACE",
|
||||
http.StatusOK,
|
||||
"true",
|
||||
},
|
||||
{
|
||||
[]*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}, {MemberID: uint64(1), Alarm: pb.AlarmType_CORRUPT}},
|
||||
"/health?exclude=NOSPACE",
|
||||
http.StatusServiceUnavailable,
|
||||
"false",
|
||||
},
|
||||
{
|
||||
[]*pb.AlarmMember{{MemberID: uint64(0), Alarm: pb.AlarmType_NOSPACE}, {MemberID: uint64(1), Alarm: pb.AlarmType_CORRUPT}},
|
||||
"/health?exclude=NOSPACE&exclude=CORRUPT",
|
||||
http.StatusOK,
|
||||
"true",
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
func() {
|
||||
mux := http.NewServeMux()
|
||||
HandleMetricsHealth(mux, &fakeServerV2{
|
||||
fakeServer: fakeServer{alarms: tt.alarms},
|
||||
Stats: &fakeStats{},
|
||||
health: tt.health,
|
||||
})
|
||||
ts := httptest.NewServer(mux)
|
||||
defer ts.Close()
|
||||
|
||||
res, err := ts.Client().Do(&http.Request{Method: http.MethodGet, URL: testutil.MustNewURL(t, ts.URL+tt.healthCheckURL)})
|
||||
if err != nil {
|
||||
t.Errorf("fail serve http request %s %v in test case #%d", tt.healthCheckURL, err, i+1)
|
||||
}
|
||||
if res == nil {
|
||||
t.Errorf("got nil http response with http request %s in test case #%d", tt.healthCheckURL, i+1)
|
||||
return
|
||||
}
|
||||
if res.StatusCode != tt.statusCode {
|
||||
t.Errorf("want statusCode %d but got %d in test case #%d", tt.statusCode, res.StatusCode, i+1)
|
||||
}
|
||||
health, err := parseHealthOutput(res.Body)
|
||||
if err != nil {
|
||||
t.Errorf("fail parse health check output %v", err)
|
||||
}
|
||||
if health.Health != tt.health {
|
||||
t.Errorf("want health %s but got %s", tt.health, health.Health)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func parseHealthOutput(body io.Reader) (Health, error) {
|
||||
obj := Health{}
|
||||
d, derr := ioutil.ReadAll(body)
|
||||
if derr != nil {
|
||||
return obj, derr
|
||||
}
|
||||
if err := json.Unmarshal(d, &obj); err != nil {
|
||||
return obj, err
|
||||
}
|
||||
return obj, nil
|
||||
}
|
@@ -58,6 +58,7 @@ func (c *fakeCluster) Version() *semver.Version { return nil }
|
||||
|
||||
type fakeServer struct {
|
||||
cluster api.Cluster
|
||||
alarms []*pb.AlarmMember
|
||||
}
|
||||
|
||||
func (s *fakeServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
|
||||
@@ -74,7 +75,7 @@ func (s *fakeServer) PromoteMember(ctx context.Context, id uint64) ([]*membershi
|
||||
}
|
||||
func (s *fakeServer) ClusterVersion() *semver.Version { return nil }
|
||||
func (s *fakeServer) Cluster() api.Cluster { return s.cluster }
|
||||
func (s *fakeServer) Alarms() []*pb.AlarmMember { return nil }
|
||||
func (s *fakeServer) Alarms() []*pb.AlarmMember { return s.alarms }
|
||||
|
||||
var fakeRaftHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte("test data"))
|
||||
|
@@ -763,16 +763,21 @@ func ValidateClusterAndAssignIDs(lg *zap.Logger, local *RaftCluster, existing *R
|
||||
if len(ems) != len(lms) {
|
||||
return fmt.Errorf("member count is unequal")
|
||||
}
|
||||
sort.Sort(MembersByPeerURLs(ems))
|
||||
sort.Sort(MembersByPeerURLs(lms))
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
|
||||
defer cancel()
|
||||
for i := range ems {
|
||||
if ok, err := netutil.URLStringsEqual(ctx, lg, ems[i].PeerURLs, lms[i].PeerURLs); !ok {
|
||||
return fmt.Errorf("unmatched member while checking PeerURLs (%v)", err)
|
||||
var err error
|
||||
ok := false
|
||||
for j := range lms {
|
||||
if ok, err = netutil.URLStringsEqual(ctx, lg, ems[i].PeerURLs, lms[j].PeerURLs); ok {
|
||||
lms[j].ID = ems[i].ID
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
return fmt.Errorf("PeerURLs: no match found for existing member (%v, %v), last resolver error (%v)", ems[i].ID, ems[i].PeerURLs, err)
|
||||
}
|
||||
lms[i].ID = ems[i].ID
|
||||
}
|
||||
local.members = make(map[types.ID]*Member)
|
||||
for _, m := range lms {
|
||||
|
@@ -218,7 +218,7 @@ func (d *discovery) createSelf(contents string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *discovery) checkCluster() ([]*client.Node, int, uint64, error) {
|
||||
func (d *discovery) checkCluster() ([]*client.Node, uint64, uint64, error) {
|
||||
configKey := path.Join("/", d.cluster, "_config")
|
||||
ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
|
||||
// find cluster size
|
||||
@@ -247,7 +247,7 @@ func (d *discovery) checkCluster() ([]*client.Node, int, uint64, error) {
|
||||
}
|
||||
return nil, 0, 0, err
|
||||
}
|
||||
size, err := strconv.Atoi(resp.Node.Value)
|
||||
size, err := strconv.ParseUint(resp.Node.Value, 10, 0)
|
||||
if err != nil {
|
||||
return nil, 0, 0, ErrBadSizeKey
|
||||
}
|
||||
@@ -288,7 +288,7 @@ func (d *discovery) checkCluster() ([]*client.Node, int, uint64, error) {
|
||||
if path.Base(nodes[i].Key) == path.Base(d.selfKey()) {
|
||||
break
|
||||
}
|
||||
if i >= size-1 {
|
||||
if uint64(i) >= size-1 {
|
||||
return nodes[:size], size, resp.Index, ErrFullCluster
|
||||
}
|
||||
}
|
||||
@@ -316,7 +316,7 @@ func (d *discovery) logAndBackoffForRetry(step string) {
|
||||
d.clock.Sleep(retryTimeInSecond)
|
||||
}
|
||||
|
||||
func (d *discovery) checkClusterRetry() ([]*client.Node, int, uint64, error) {
|
||||
func (d *discovery) checkClusterRetry() ([]*client.Node, uint64, uint64, error) {
|
||||
if d.retries < nRetries {
|
||||
d.logAndBackoffForRetry("cluster status check")
|
||||
return d.checkCluster()
|
||||
@@ -336,8 +336,8 @@ func (d *discovery) waitNodesRetry() ([]*client.Node, error) {
|
||||
return nil, ErrTooManyRetries
|
||||
}
|
||||
|
||||
func (d *discovery) waitNodes(nodes []*client.Node, size int, index uint64) ([]*client.Node, error) {
|
||||
if len(nodes) > size {
|
||||
func (d *discovery) waitNodes(nodes []*client.Node, size uint64, index uint64) ([]*client.Node, error) {
|
||||
if uint64(len(nodes)) > size {
|
||||
nodes = nodes[:size]
|
||||
}
|
||||
// watch from the next index
|
||||
@@ -369,16 +369,16 @@ func (d *discovery) waitNodes(nodes []*client.Node, size int, index uint64) ([]*
|
||||
}
|
||||
|
||||
// wait for others
|
||||
for len(all) < size {
|
||||
for uint64(len(all)) < size {
|
||||
if d.lg != nil {
|
||||
d.lg.Info(
|
||||
"found peers from discovery server; waiting for more",
|
||||
zap.String("discovery-url", d.url.String()),
|
||||
zap.Int("found-peers", len(all)),
|
||||
zap.Int("needed-peers", size-len(all)),
|
||||
zap.Int("needed-peers", int(size-uint64(len(all)))),
|
||||
)
|
||||
} else {
|
||||
plog.Noticef("found %d peer(s), waiting for %d more", len(all), size-len(all))
|
||||
plog.Noticef("found %d peer(s), waiting for %d more", len(all), size-uint64(len(all)))
|
||||
}
|
||||
resp, err := w.Next(context.Background())
|
||||
if err != nil {
|
||||
@@ -415,7 +415,7 @@ func (d *discovery) selfKey() string {
|
||||
return path.Join("/", d.cluster, d.id.String())
|
||||
}
|
||||
|
||||
func nodesToCluster(ns []*client.Node, size int) (string, error) {
|
||||
func nodesToCluster(ns []*client.Node, size uint64) (string, error) {
|
||||
s := make([]string, len(ns))
|
||||
for i, n := range ns {
|
||||
s[i] = n.Value
|
||||
@@ -425,7 +425,7 @@ func nodesToCluster(ns []*client.Node, size int) (string, error) {
|
||||
if err != nil {
|
||||
return us, ErrInvalidURL
|
||||
}
|
||||
if m.Len() != size {
|
||||
if uint64(m.Len()) != size {
|
||||
return us, ErrDuplicateName
|
||||
}
|
||||
return us, nil
|
||||
|
@@ -217,7 +217,7 @@ func TestCheckCluster(t *testing.T) {
|
||||
if reflect.DeepEqual(ns, tt.nodes) {
|
||||
t.Errorf("#%d: nodes = %v, want %v", i, ns, tt.nodes)
|
||||
}
|
||||
if size != tt.wsize {
|
||||
if size != uint64(tt.wsize) {
|
||||
t.Errorf("#%d: size = %v, want %d", i, size, tt.wsize)
|
||||
}
|
||||
if index != tt.index {
|
||||
@@ -301,7 +301,7 @@ func TestWaitNodes(t *testing.T) {
|
||||
fc.Advance(time.Second * (0x1 << i))
|
||||
}
|
||||
}()
|
||||
g, err := d.waitNodes(tt.nodes, 3, 0) // we do not care about index in this test
|
||||
g, err := d.waitNodes(tt.nodes, uint64(3), 0) // we do not care about index in this test
|
||||
if err != nil {
|
||||
t.Errorf("#%d: err = %v, want %v", i, err, nil)
|
||||
}
|
||||
@@ -348,7 +348,7 @@ func TestCreateSelf(t *testing.T) {
|
||||
func TestNodesToCluster(t *testing.T) {
|
||||
tests := []struct {
|
||||
nodes []*client.Node
|
||||
size int
|
||||
size uint64
|
||||
wcluster string
|
||||
werr error
|
||||
}{
|
||||
|
@@ -844,7 +844,7 @@ func TestStoreWatchSlowConsumer(t *testing.T) {
|
||||
s.Watch("/foo", true, true, 0) // stream must be true
|
||||
// Fill watch channel with 100 events
|
||||
for i := 1; i <= 100; i++ {
|
||||
s.Set("/foo", false, string(i), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) // ok
|
||||
s.Set("/foo", false, string(rune(i)), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) // ok
|
||||
}
|
||||
// testutil.AssertEqual(t, s.WatcherHub.count, int64(1))
|
||||
s.Set("/foo", false, "101", v2store.TTLOptionSet{ExpireTime: v2store.Permanent}) // ok
|
||||
|
@@ -217,8 +217,8 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor
|
||||
return rpctypes.ErrGRPCNoLeader
|
||||
}
|
||||
|
||||
cctx, cancel := context.WithCancel(ss.Context())
|
||||
ss = serverStreamWithCtx{ctx: cctx, cancel: &cancel, ServerStream: ss}
|
||||
ctx := newCancellableContext(ss.Context())
|
||||
ss = serverStreamWithCtx{ctx: ctx, ServerStream: ss}
|
||||
|
||||
smap.mu.Lock()
|
||||
smap.streams[ss] = struct{}{}
|
||||
@@ -228,7 +228,8 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor
|
||||
smap.mu.Lock()
|
||||
delete(smap.streams, ss)
|
||||
smap.mu.Unlock()
|
||||
cancel()
|
||||
// TODO: investigate whether the reason for cancellation here is useful to know
|
||||
ctx.Cancel(nil)
|
||||
}()
|
||||
}
|
||||
}
|
||||
@@ -237,10 +238,52 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor
|
||||
}
|
||||
}
|
||||
|
||||
// cancellableContext wraps a context with new cancellable context that allows a
|
||||
// specific cancellation error to be preserved and later retrieved using the
|
||||
// Context.Err() function. This is so downstream context users can disambiguate
|
||||
// the reason for the cancellation which could be from the client (for example)
|
||||
// or from this interceptor code.
|
||||
type cancellableContext struct {
|
||||
context.Context
|
||||
|
||||
lock sync.RWMutex
|
||||
cancel context.CancelFunc
|
||||
cancelReason error
|
||||
}
|
||||
|
||||
func newCancellableContext(parent context.Context) *cancellableContext {
|
||||
ctx, cancel := context.WithCancel(parent)
|
||||
return &cancellableContext{
|
||||
Context: ctx,
|
||||
cancel: cancel,
|
||||
}
|
||||
}
|
||||
|
||||
// Cancel stores the cancellation reason and then delegates to context.WithCancel
|
||||
// against the parent context.
|
||||
func (c *cancellableContext) Cancel(reason error) {
|
||||
c.lock.Lock()
|
||||
c.cancelReason = reason
|
||||
c.lock.Unlock()
|
||||
c.cancel()
|
||||
}
|
||||
|
||||
// Err will return the preserved cancel reason error if present, and will
|
||||
// otherwise return the underlying error from the parent context.
|
||||
func (c *cancellableContext) Err() error {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
if c.cancelReason != nil {
|
||||
return c.cancelReason
|
||||
}
|
||||
return c.Context.Err()
|
||||
}
|
||||
|
||||
type serverStreamWithCtx struct {
|
||||
grpc.ServerStream
|
||||
ctx context.Context
|
||||
cancel *context.CancelFunc
|
||||
|
||||
// ctx is used so that we can preserve a reason for cancellation.
|
||||
ctx *cancellableContext
|
||||
}
|
||||
|
||||
func (ssc serverStreamWithCtx) Context() context.Context { return ssc.ctx }
|
||||
@@ -272,7 +315,7 @@ func monitorLeader(s *etcdserver.EtcdServer) *streamsMap {
|
||||
smap.mu.Lock()
|
||||
for ss := range smap.streams {
|
||||
if ssWithCtx, ok := ss.(serverStreamWithCtx); ok {
|
||||
(*ssWithCtx.cancel)()
|
||||
ssWithCtx.ctx.Cancel(rpctypes.ErrGRPCNoLeader)
|
||||
<-ss.Context().Done()
|
||||
}
|
||||
}
|
||||
|
@@ -35,6 +35,8 @@ var (
|
||||
ErrGRPCLeaseExist = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err()
|
||||
ErrGRPCLeaseTTLTooLarge = status.New(codes.OutOfRange, "etcdserver: too large lease TTL").Err()
|
||||
|
||||
ErrGRPCWatchCanceled = status.New(codes.Canceled, "etcdserver: watch canceled").Err()
|
||||
|
||||
ErrGRPCMemberExist = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err()
|
||||
ErrGRPCPeerURLExist = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err()
|
||||
ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err()
|
||||
|
@@ -31,6 +31,8 @@ import (
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const minWatchProgressInterval = 100 * time.Millisecond
|
||||
|
||||
type watchServer struct {
|
||||
lg *zap.Logger
|
||||
|
||||
@@ -46,7 +48,7 @@ type watchServer struct {
|
||||
|
||||
// NewWatchServer returns a new watch server.
|
||||
func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
|
||||
return &watchServer{
|
||||
srv := &watchServer{
|
||||
lg: s.Cfg.Logger,
|
||||
|
||||
clusterID: int64(s.Cluster().ID()),
|
||||
@@ -58,6 +60,21 @@ func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
|
||||
watchable: s.Watchable(),
|
||||
ag: s,
|
||||
}
|
||||
if s.Cfg.WatchProgressNotifyInterval > 0 {
|
||||
if s.Cfg.WatchProgressNotifyInterval < minWatchProgressInterval {
|
||||
if srv.lg != nil {
|
||||
srv.lg.Warn(
|
||||
"adjusting watch progress notify interval to minimum period",
|
||||
zap.Duration("min-watch-progress-notify-interval", minWatchProgressInterval),
|
||||
)
|
||||
} else {
|
||||
plog.Warningf("adjusting watch progress notify interval to minimum period %v", minWatchProgressInterval)
|
||||
}
|
||||
s.Cfg.WatchProgressNotifyInterval = minWatchProgressInterval
|
||||
}
|
||||
SetProgressReportInterval(s.Cfg.WatchProgressNotifyInterval)
|
||||
}
|
||||
return srv
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -189,15 +206,25 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
// TODO: There's a race here. When a stream is closed (e.g. due to a cancellation),
|
||||
// the underlying error (e.g. a gRPC stream error) may be returned and handled
|
||||
// through errc if the recv goroutine finishes before the send goroutine.
|
||||
// When the recv goroutine wins, the stream error is retained. When recv loses
|
||||
// the race, the underlying error is lost (unless the root error is propagated
|
||||
// through Context.Err() which is not always the case (as callers have to decide
|
||||
// to implement a custom context to do so). The stdlib context package builtins
|
||||
// may be insufficient to carry semantically useful errors around and should be
|
||||
// revisited.
|
||||
select {
|
||||
case err = <-errc:
|
||||
if err == context.Canceled {
|
||||
err = rpctypes.ErrGRPCWatchCanceled
|
||||
}
|
||||
close(sws.ctrlStream)
|
||||
|
||||
case <-stream.Context().Done():
|
||||
err = stream.Context().Err()
|
||||
// the only server-side cancellation is noleader for now.
|
||||
if err == context.Canceled {
|
||||
err = rpctypes.ErrGRPCNoLeader
|
||||
err = rpctypes.ErrGRPCWatchCanceled
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -33,10 +33,6 @@ import (
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
warnApplyDuration = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
type applyResult struct {
|
||||
resp proto.Message
|
||||
err error
|
||||
@@ -115,7 +111,7 @@ func (s *EtcdServer) newApplierV3() applierV3 {
|
||||
func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult {
|
||||
ar := &applyResult{}
|
||||
defer func(start time.Time) {
|
||||
warnOfExpensiveRequest(a.s.getLogger(), start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err)
|
||||
warnOfExpensiveRequest(a.s.getLogger(), a.s.Cfg.WarningApplyDuration, start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err)
|
||||
if ar.err != nil {
|
||||
warnOfFailedRequest(a.s.getLogger(), start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err)
|
||||
}
|
||||
@@ -185,7 +181,7 @@ func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.Pu
|
||||
trace = traceutil.New("put",
|
||||
a.s.getLogger(),
|
||||
traceutil.Field{Key: "key", Value: string(p.Key)},
|
||||
traceutil.Field{Key: "req_size", Value: proto.Size(p)},
|
||||
traceutil.Field{Key: "req_size", Value: p.Size()},
|
||||
)
|
||||
val, leaseID := p.Value, lease.LeaseID(p.Lease)
|
||||
if txn == nil {
|
||||
|
@@ -16,6 +16,7 @@ package etcdserver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
@@ -114,7 +115,11 @@ func (a *applierV2store) Sync(r *RequestV2) Response {
|
||||
// applyV2Request interprets r as a call to v2store.X
|
||||
// and returns a Response interpreted from v2store.Event
|
||||
func (s *EtcdServer) applyV2Request(r *RequestV2) Response {
|
||||
defer warnOfExpensiveRequest(s.getLogger(), time.Now(), r, nil, nil)
|
||||
stringer := panicAlternativeStringer{
|
||||
stringer: r,
|
||||
alternative: func() string { return fmt.Sprintf("id:%d,method:%s,path:%s", r.ID, r.Method, r.Path) },
|
||||
}
|
||||
defer warnOfExpensiveRequest(s.getLogger(), s.Cfg.WarningApplyDuration, time.Now(), stringer, nil, nil)
|
||||
|
||||
switch r.Method {
|
||||
case "POST":
|
||||
|
@@ -31,6 +31,7 @@ import (
|
||||
func newBackend(cfg ServerConfig) backend.Backend {
|
||||
bcfg := backend.DefaultBackendConfig()
|
||||
bcfg.Path = cfg.backendPath()
|
||||
bcfg.UnsafeNoFsync = cfg.UnsafeNoFsync
|
||||
if cfg.BackendBatchLimit != 0 {
|
||||
bcfg.BatchLimit = cfg.BackendBatchLimit
|
||||
if cfg.Logger != nil {
|
||||
|
@@ -119,6 +119,8 @@ type ServerConfig struct {
|
||||
// MaxRequestBytes is the maximum request size to send over raft.
|
||||
MaxRequestBytes uint
|
||||
|
||||
WarningApplyDuration time.Duration
|
||||
|
||||
StrictReconfigCheck bool
|
||||
|
||||
// ClientCertAuthEnabled is true when cert has been signed by the client CA.
|
||||
@@ -126,6 +128,7 @@ type ServerConfig struct {
|
||||
|
||||
AuthToken string
|
||||
BcryptCost uint
|
||||
TokenTTL uint
|
||||
|
||||
// InitialCorruptCheck is true to check data corruption on boot
|
||||
// before serving any peer/client traffic.
|
||||
@@ -157,6 +160,12 @@ type ServerConfig struct {
|
||||
LeaseCheckpointInterval time.Duration
|
||||
|
||||
EnableGRPCGateway bool
|
||||
|
||||
WatchProgressNotifyInterval time.Duration
|
||||
|
||||
// UnsafeNoFsync disables all uses of fsync.
|
||||
// Setting this is unsafe and will cause data loss.
|
||||
UnsafeNoFsync bool `json:"unsafe-no-fsync"`
|
||||
}
|
||||
|
||||
// VerifyBootstrap sanity-checks the initial config for bootstrap case
|
||||
|
@@ -137,7 +137,7 @@ type loggableValueCompare struct {
|
||||
Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult"`
|
||||
Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget"`
|
||||
Key []byte `protobuf:"bytes,3,opt,name=key,proto3"`
|
||||
ValueSize int `protobuf:"bytes,7,opt,name=value_size,proto3"`
|
||||
ValueSize int64 `protobuf:"varint,7,opt,name=value_size,proto3"`
|
||||
RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,proto3"`
|
||||
}
|
||||
|
||||
@@ -146,7 +146,7 @@ func newLoggableValueCompare(c *Compare, cv *Compare_Value) *loggableValueCompar
|
||||
c.Result,
|
||||
c.Target,
|
||||
c.Key,
|
||||
len(cv.Value),
|
||||
int64(len(cv.Value)),
|
||||
c.RangeEnd,
|
||||
}
|
||||
}
|
||||
@@ -160,7 +160,7 @@ func (*loggableValueCompare) ProtoMessage() {}
|
||||
// To preserve proto encoding of the key bytes, a faked out proto type is used here.
|
||||
type loggablePutRequest struct {
|
||||
Key []byte `protobuf:"bytes,1,opt,name=key,proto3"`
|
||||
ValueSize int `protobuf:"varint,2,opt,name=value_size,proto3"`
|
||||
ValueSize int64 `protobuf:"varint,2,opt,name=value_size,proto3"`
|
||||
Lease int64 `protobuf:"varint,3,opt,name=lease,proto3"`
|
||||
PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,proto3"`
|
||||
IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,proto3"`
|
||||
@@ -170,7 +170,7 @@ type loggablePutRequest struct {
|
||||
func NewLoggablePutRequest(request *PutRequest) *loggablePutRequest {
|
||||
return &loggablePutRequest{
|
||||
request.Key,
|
||||
len(request.Value),
|
||||
int64(len(request.Value)),
|
||||
request.Lease,
|
||||
request.PrevKv,
|
||||
request.IgnoreValue,
|
||||
|
@@ -151,6 +151,19 @@ var (
|
||||
Help: "Server or member ID in hexadecimal format. 1 for 'server_id' label with current ID.",
|
||||
},
|
||||
[]string{"server_id"})
|
||||
|
||||
fdUsed = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "os",
|
||||
Subsystem: "fd",
|
||||
Name: "used",
|
||||
Help: "The number of used file descriptors.",
|
||||
})
|
||||
fdLimit = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "os",
|
||||
Subsystem: "fd",
|
||||
Name: "limit",
|
||||
Help: "The file descriptor limit.",
|
||||
})
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -174,6 +187,8 @@ func init() {
|
||||
prometheus.MustRegister(isLearner)
|
||||
prometheus.MustRegister(learnerPromoteSucceed)
|
||||
prometheus.MustRegister(learnerPromoteFailed)
|
||||
prometheus.MustRegister(fdUsed)
|
||||
prometheus.MustRegister(fdLimit)
|
||||
|
||||
currentVersion.With(prometheus.Labels{
|
||||
"server_version": version.Version,
|
||||
@@ -184,7 +199,12 @@ func init() {
|
||||
}
|
||||
|
||||
func monitorFileDescriptor(lg *zap.Logger, done <-chan struct{}) {
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
// This ticker will check File Descriptor Requirements ,and count all fds in used.
|
||||
// And recorded some logs when in used >= limit/5*4. Just recorded message.
|
||||
// If fds was more than 10K,It's low performance due to FDUsage() works.
|
||||
// So need to increase it.
|
||||
// See https://github.com/etcd-io/etcd/issues/11969 for more detail.
|
||||
ticker := time.NewTicker(10 * time.Minute)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
used, err := runtime.FDUsage()
|
||||
@@ -196,6 +216,7 @@ func monitorFileDescriptor(lg *zap.Logger, done <-chan struct{}) {
|
||||
}
|
||||
return
|
||||
}
|
||||
fdUsed.Set(float64(used))
|
||||
limit, err := runtime.FDLimit()
|
||||
if err != nil {
|
||||
if lg != nil {
|
||||
@@ -205,6 +226,7 @@ func monitorFileDescriptor(lg *zap.Logger, done <-chan struct{}) {
|
||||
}
|
||||
return
|
||||
}
|
||||
fdLimit.Set(float64(limit))
|
||||
if used >= limit/5*4 {
|
||||
if lg != nil {
|
||||
lg.Warn("80% of file descriptors are used", zap.Uint64("used", used), zap.Uint64("limit", limit))
|
||||
|
@@ -465,6 +465,9 @@ func startNode(cfg ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id
|
||||
plog.Panicf("create wal error: %v", err)
|
||||
}
|
||||
}
|
||||
if cfg.UnsafeNoFsync {
|
||||
w.SetUnsafeNoFsync()
|
||||
}
|
||||
peers := make([]raft.Peer, len(ids))
|
||||
for i, id := range ids {
|
||||
var ctx []byte
|
||||
@@ -527,7 +530,7 @@ func restartNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *member
|
||||
if snapshot != nil {
|
||||
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
|
||||
}
|
||||
w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap)
|
||||
w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap, cfg.UnsafeNoFsync)
|
||||
|
||||
if cfg.Logger != nil {
|
||||
cfg.Logger.Info(
|
||||
@@ -582,7 +585,7 @@ func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types
|
||||
if snapshot != nil {
|
||||
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
|
||||
}
|
||||
w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap)
|
||||
w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap, cfg.UnsafeNoFsync)
|
||||
|
||||
// discard the previously uncommitted entries
|
||||
for i, ent := range ents {
|
||||
@@ -672,10 +675,11 @@ func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types
|
||||
}
|
||||
|
||||
// getIDs returns an ordered set of IDs included in the given snapshot and
|
||||
// the entries. The given snapshot/entries can contain two kinds of
|
||||
// the entries. The given snapshot/entries can contain three kinds of
|
||||
// ID-related entry:
|
||||
// - ConfChangeAddNode, in which case the contained ID will be added into the set.
|
||||
// - ConfChangeRemoveNode, in which case the contained ID will be removed from the set.
|
||||
// - ConfChangeAddLearnerNode, in which the contained ID will be added into the set.
|
||||
func getIDs(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
|
||||
ids := make(map[uint64]bool)
|
||||
if snap != nil {
|
||||
@@ -690,6 +694,8 @@ func getIDs(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64
|
||||
var cc raftpb.ConfChange
|
||||
pbutil.MustUnmarshal(&cc, e.Data)
|
||||
switch cc.Type {
|
||||
case raftpb.ConfChangeAddLearnerNode:
|
||||
ids[cc.NodeID] = true
|
||||
case raftpb.ConfChangeAddNode:
|
||||
ids[cc.NodeID] = true
|
||||
case raftpb.ConfChangeRemoveNode:
|
||||
|
@@ -553,6 +553,7 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
|
||||
func(index uint64) <-chan struct{} {
|
||||
return srv.applyWait.Wait(index)
|
||||
},
|
||||
time.Duration(cfg.TokenTTL)*time.Second,
|
||||
)
|
||||
if err != nil {
|
||||
if cfg.Logger != nil {
|
||||
|
@@ -82,7 +82,7 @@ func (st *storage) Release(snap raftpb.Snapshot) error {
|
||||
// readWAL reads the WAL at the given snap and returns the wal, its latest HardState and cluster ID, and all entries that appear
|
||||
// after the position of the given snap in the WAL.
|
||||
// The snap must have been previously saved to the WAL, or this call will panic.
|
||||
func readWAL(lg *zap.Logger, waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) {
|
||||
func readWAL(lg *zap.Logger, waldir string, snap walpb.Snapshot, unsafeNoFsync bool) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) {
|
||||
var (
|
||||
err error
|
||||
wmetadata []byte
|
||||
@@ -97,6 +97,9 @@ func readWAL(lg *zap.Logger, waldir string, snap walpb.Snapshot) (w *wal.WAL, id
|
||||
plog.Fatalf("open wal error: %v", err)
|
||||
}
|
||||
}
|
||||
if unsafeNoFsync {
|
||||
w.SetUnsafeNoFsync()
|
||||
}
|
||||
if wmetadata, st, ents, err = w.ReadAll(); err != nil {
|
||||
w.Close()
|
||||
// we can only repair ErrUnexpectedEOF and we never repair twice.
|
||||
|
@@ -103,12 +103,12 @@ func (nc *notifier) notify(err error) {
|
||||
close(nc.c)
|
||||
}
|
||||
|
||||
func warnOfExpensiveRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) {
|
||||
func warnOfExpensiveRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) {
|
||||
var resp string
|
||||
if !isNil(respMsg) {
|
||||
resp = fmt.Sprintf("size:%d", proto.Size(respMsg))
|
||||
}
|
||||
warnOfExpensiveGenericRequest(lg, now, reqStringer, "", resp, err)
|
||||
warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "", resp, err)
|
||||
}
|
||||
|
||||
func warnOfFailedRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) {
|
||||
@@ -130,7 +130,7 @@ func warnOfFailedRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer
|
||||
}
|
||||
}
|
||||
|
||||
func warnOfExpensiveReadOnlyTxnRequest(lg *zap.Logger, now time.Time, r *pb.TxnRequest, txnResponse *pb.TxnResponse, err error) {
|
||||
func warnOfExpensiveReadOnlyTxnRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, r *pb.TxnRequest, txnResponse *pb.TxnResponse, err error) {
|
||||
reqStringer := pb.NewLoggableTxnRequest(r)
|
||||
var resp string
|
||||
if !isNil(txnResponse) {
|
||||
@@ -143,27 +143,27 @@ func warnOfExpensiveReadOnlyTxnRequest(lg *zap.Logger, now time.Time, r *pb.TxnR
|
||||
// only range responses should be in a read only txn request
|
||||
}
|
||||
}
|
||||
resp = fmt.Sprintf("responses:<%s> size:%d", strings.Join(resps, " "), proto.Size(txnResponse))
|
||||
resp = fmt.Sprintf("responses:<%s> size:%d", strings.Join(resps, " "), txnResponse.Size())
|
||||
}
|
||||
warnOfExpensiveGenericRequest(lg, now, reqStringer, "read-only range ", resp, err)
|
||||
warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "read-only range ", resp, err)
|
||||
}
|
||||
|
||||
func warnOfExpensiveReadOnlyRangeRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer, rangeResponse *pb.RangeResponse, err error) {
|
||||
func warnOfExpensiveReadOnlyRangeRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, rangeResponse *pb.RangeResponse, err error) {
|
||||
var resp string
|
||||
if !isNil(rangeResponse) {
|
||||
resp = fmt.Sprintf("range_response_count:%d size:%d", len(rangeResponse.Kvs), proto.Size(rangeResponse))
|
||||
resp = fmt.Sprintf("range_response_count:%d size:%d", len(rangeResponse.Kvs), rangeResponse.Size())
|
||||
}
|
||||
warnOfExpensiveGenericRequest(lg, now, reqStringer, "read-only range ", resp, err)
|
||||
warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "read-only range ", resp, err)
|
||||
}
|
||||
|
||||
func warnOfExpensiveGenericRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer, prefix string, resp string, err error) {
|
||||
func warnOfExpensiveGenericRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, prefix string, resp string, err error) {
|
||||
d := time.Since(now)
|
||||
if d > warnApplyDuration {
|
||||
if d > warningApplyDuration {
|
||||
if lg != nil {
|
||||
lg.Warn(
|
||||
"apply request took too long",
|
||||
zap.Duration("took", d),
|
||||
zap.Duration("expected-duration", warnApplyDuration),
|
||||
zap.Duration("expected-duration", warningApplyDuration),
|
||||
zap.String("prefix", prefix),
|
||||
zap.String("request", reqStringer.String()),
|
||||
zap.String("response", resp),
|
||||
@@ -185,3 +185,21 @@ func warnOfExpensiveGenericRequest(lg *zap.Logger, now time.Time, reqStringer fm
|
||||
func isNil(msg proto.Message) bool {
|
||||
return msg == nil || reflect.ValueOf(msg).IsNil()
|
||||
}
|
||||
|
||||
// panicAlternativeStringer wraps a fmt.Stringer, and if calling String() panics, calls the alternative instead.
|
||||
// This is needed to ensure logging slow v2 requests does not panic, which occurs when running integration tests
|
||||
// with the embedded server with github.com/golang/protobuf v1.4.0+. See https://github.com/etcd-io/etcd/issues/12197.
|
||||
type panicAlternativeStringer struct {
|
||||
stringer fmt.Stringer
|
||||
alternative func() string
|
||||
}
|
||||
|
||||
func (n panicAlternativeStringer) String() (s string) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
s = n.alternative()
|
||||
}
|
||||
}()
|
||||
s = n.stringer.String()
|
||||
return s
|
||||
}
|
||||
|
@@ -90,3 +90,23 @@ func (s *nopTransporterWithActiveTime) Stop() {}
|
||||
func (s *nopTransporterWithActiveTime) Pause() {}
|
||||
func (s *nopTransporterWithActiveTime) Resume() {}
|
||||
func (s *nopTransporterWithActiveTime) reset(am map[types.ID]time.Time) { s.activeMap = am }
|
||||
|
||||
func TestPanicAlternativeStringer(t *testing.T) {
|
||||
p := panicAlternativeStringer{alternative: func() string { return "alternative" }}
|
||||
|
||||
p.stringer = testStringerFunc(func() string { panic("here") })
|
||||
if s := p.String(); s != "alternative" {
|
||||
t.Fatalf("expected 'alternative', got %q", s)
|
||||
}
|
||||
|
||||
p.stringer = testStringerFunc(func() string { return "test" })
|
||||
if s := p.String(); s != "test" {
|
||||
t.Fatalf("expected 'test', got %q", s)
|
||||
}
|
||||
}
|
||||
|
||||
type testStringerFunc func() string
|
||||
|
||||
func (s testStringerFunc) String() string {
|
||||
return s()
|
||||
}
|
||||
|
@@ -97,7 +97,7 @@ func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeRe
|
||||
var resp *pb.RangeResponse
|
||||
var err error
|
||||
defer func(start time.Time) {
|
||||
warnOfExpensiveReadOnlyRangeRequest(s.getLogger(), start, r, resp, err)
|
||||
warnOfExpensiveReadOnlyRangeRequest(s.getLogger(), s.Cfg.WarningApplyDuration, start, r, resp, err)
|
||||
if resp != nil {
|
||||
trace.AddField(
|
||||
traceutil.Field{Key: "response_count", Value: len(resp.Kvs)},
|
||||
@@ -158,7 +158,7 @@ func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse
|
||||
}
|
||||
|
||||
defer func(start time.Time) {
|
||||
warnOfExpensiveReadOnlyTxnRequest(s.getLogger(), start, r, resp, err)
|
||||
warnOfExpensiveReadOnlyTxnRequest(s.getLogger(), s.Cfg.WarningApplyDuration, start, r, resp, err)
|
||||
}(time.Now())
|
||||
|
||||
get := func() { resp, err = s.applyV3Base.Txn(r) }
|
||||
@@ -428,9 +428,10 @@ func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// internalReq doesn't need to have Password because the above s.AuthStore().CheckPassword() already did it.
|
||||
// In addition, it will let a WAL entry not record password as a plain text.
|
||||
internalReq := &pb.InternalAuthenticateRequest{
|
||||
Name: r.Name,
|
||||
Password: r.Password,
|
||||
SimpleToken: st,
|
||||
}
|
||||
|
||||
|
@@ -13,7 +13,7 @@ if ! [[ "${0}" =~ "scripts/docker-local-agent.sh" ]]; then
|
||||
fi
|
||||
|
||||
if [[ -z "${GO_VERSION}" ]]; then
|
||||
GO_VERSION=1.12.12
|
||||
GO_VERSION=1.12.17
|
||||
fi
|
||||
echo "Running with GO_VERSION:" ${GO_VERSION}
|
||||
|
||||
|
@@ -6,7 +6,7 @@ if ! [[ "${0}" =~ "scripts/docker-local-tester.sh" ]]; then
|
||||
fi
|
||||
|
||||
if [[ -z "${GO_VERSION}" ]]; then
|
||||
GO_VERSION=1.12.12
|
||||
GO_VERSION=1.12.17
|
||||
fi
|
||||
echo "Running with GO_VERSION:" ${GO_VERSION}
|
||||
|
||||
|
19
go.mod
19
go.mod
@@ -1,6 +1,6 @@
|
||||
module go.etcd.io/etcd
|
||||
|
||||
go 1.14
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/bgentry/speakeasy v0.1.0
|
||||
@@ -8,16 +8,16 @@ require (
|
||||
github.com/coreos/go-semver v0.2.0
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf
|
||||
github.com/creack/pty v1.1.7
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible
|
||||
github.com/creack/pty v1.1.11
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4
|
||||
github.com/fatih/color v1.7.0 // indirect
|
||||
github.com/gogo/protobuf v1.2.1
|
||||
github.com/golang-jwt/jwt v3.2.1+incompatible
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903
|
||||
github.com/golang/protobuf v1.3.2
|
||||
github.com/google/btree v1.0.0
|
||||
github.com/google/uuid v1.0.0
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c // indirect
|
||||
github.com/gorilla/websocket v1.4.2 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5
|
||||
@@ -35,22 +35,19 @@ require (
|
||||
github.com/soheilhy/cmux v0.1.4
|
||||
github.com/spf13/cobra v0.0.3
|
||||
github.com/spf13/pflag v1.0.1
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966
|
||||
github.com/urfave/cli v1.20.0
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2
|
||||
go.etcd.io/bbolt v1.3.3
|
||||
go.uber.org/atomic v1.3.2 // indirect
|
||||
go.uber.org/multierr v1.1.0 // indirect
|
||||
go.uber.org/zap v1.10.0
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 // indirect
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 // indirect
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 // indirect
|
||||
google.golang.org/grpc v1.26.0
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25
|
||||
gopkg.in/yaml.v2 v2.2.2
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc // indirect
|
||||
sigs.k8s.io/yaml v1.1.0
|
||||
)
|
||||
|
54
go.sum
54
go.sum
@@ -2,7 +2,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
@@ -18,13 +17,11 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 h1:u9SHYsPQNyt5t
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf h1:CAKfRE2YtTUIjjh1bkBtyYFaUT/WmOqsJjgtihT0vMI=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/creack/pty v1.1.7 h1:6pwm8kMQKCmgUg0ZHTm5+/YvRK0s3THD/28+T6/kk4A=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
|
||||
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4 h1:qk/FSDDxo05wdJH28W+p5yivv7LuLYLRXPPD8KQCtZs=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
@@ -35,29 +32,29 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c=
|
||||
github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c h1:Lh2aW+HnU2Nbe1gqD9SOJLJxW1jBMmQOktN2acDyJk8=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
@@ -68,7 +65,6 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
@@ -108,7 +104,6 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
|
||||
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
@@ -118,7 +113,6 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
|
||||
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
@@ -130,12 +124,11 @@ github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8 h1:ndzgwNDnKIqyCvHTXaCqh9KlOWKvBry6nuXMJmonVsE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966 h1:j6JEOq5QWFker+d7mFQYOhjTZonQ7YkLTHm56dbn+yM=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
@@ -148,26 +141,24 @@ go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc h1:c0o/qxkaO2LF5t6fQrT4b5hzyggAkLLlCUjqfRxd8Q4=
|
||||
golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -176,36 +167,35 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5 h1:mzjBh+S5frKOsOBobWIMAbXavqjmgO17k/2puhcFR94=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s=
|
||||
google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
|
||||
google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
@@ -63,7 +63,7 @@ import (
|
||||
|
||||
const (
|
||||
// RequestWaitTimeout is the time duration to wait for a request to go through or detect leader loss.
|
||||
RequestWaitTimeout = 3 * time.Second
|
||||
RequestWaitTimeout = 4 * time.Second
|
||||
tickDuration = 10 * time.Millisecond
|
||||
requestTimeout = 20 * time.Second
|
||||
|
||||
@@ -152,6 +152,8 @@ type ClusterConfig struct {
|
||||
|
||||
EnableLeaseCheckpoint bool
|
||||
LeaseCheckpointInterval time.Duration
|
||||
|
||||
WatchProgressNotifyInterval time.Duration
|
||||
}
|
||||
|
||||
type cluster struct {
|
||||
@@ -279,23 +281,24 @@ func (c *cluster) HTTPMembers() []client.Member {
|
||||
func (c *cluster) mustNewMember(t testing.TB) *member {
|
||||
m := mustNewMember(t,
|
||||
memberConfig{
|
||||
name: c.name(rand.Int()),
|
||||
authToken: c.cfg.AuthToken,
|
||||
peerTLS: c.cfg.PeerTLS,
|
||||
clientTLS: c.cfg.ClientTLS,
|
||||
quotaBackendBytes: c.cfg.QuotaBackendBytes,
|
||||
maxTxnOps: c.cfg.MaxTxnOps,
|
||||
maxRequestBytes: c.cfg.MaxRequestBytes,
|
||||
snapshotCount: c.cfg.SnapshotCount,
|
||||
snapshotCatchUpEntries: c.cfg.SnapshotCatchUpEntries,
|
||||
grpcKeepAliveMinTime: c.cfg.GRPCKeepAliveMinTime,
|
||||
grpcKeepAliveInterval: c.cfg.GRPCKeepAliveInterval,
|
||||
grpcKeepAliveTimeout: c.cfg.GRPCKeepAliveTimeout,
|
||||
clientMaxCallSendMsgSize: c.cfg.ClientMaxCallSendMsgSize,
|
||||
clientMaxCallRecvMsgSize: c.cfg.ClientMaxCallRecvMsgSize,
|
||||
useIP: c.cfg.UseIP,
|
||||
enableLeaseCheckpoint: c.cfg.EnableLeaseCheckpoint,
|
||||
leaseCheckpointInterval: c.cfg.LeaseCheckpointInterval,
|
||||
name: c.name(rand.Int()),
|
||||
authToken: c.cfg.AuthToken,
|
||||
peerTLS: c.cfg.PeerTLS,
|
||||
clientTLS: c.cfg.ClientTLS,
|
||||
quotaBackendBytes: c.cfg.QuotaBackendBytes,
|
||||
maxTxnOps: c.cfg.MaxTxnOps,
|
||||
maxRequestBytes: c.cfg.MaxRequestBytes,
|
||||
snapshotCount: c.cfg.SnapshotCount,
|
||||
snapshotCatchUpEntries: c.cfg.SnapshotCatchUpEntries,
|
||||
grpcKeepAliveMinTime: c.cfg.GRPCKeepAliveMinTime,
|
||||
grpcKeepAliveInterval: c.cfg.GRPCKeepAliveInterval,
|
||||
grpcKeepAliveTimeout: c.cfg.GRPCKeepAliveTimeout,
|
||||
clientMaxCallSendMsgSize: c.cfg.ClientMaxCallSendMsgSize,
|
||||
clientMaxCallRecvMsgSize: c.cfg.ClientMaxCallRecvMsgSize,
|
||||
useIP: c.cfg.UseIP,
|
||||
enableLeaseCheckpoint: c.cfg.EnableLeaseCheckpoint,
|
||||
leaseCheckpointInterval: c.cfg.LeaseCheckpointInterval,
|
||||
WatchProgressNotifyInterval: c.cfg.WatchProgressNotifyInterval,
|
||||
})
|
||||
m.DiscoveryURL = c.cfg.DiscoveryURL
|
||||
if c.cfg.UseGRPC {
|
||||
@@ -568,23 +571,24 @@ type member struct {
|
||||
func (m *member) GRPCAddr() string { return m.grpcAddr }
|
||||
|
||||
type memberConfig struct {
|
||||
name string
|
||||
peerTLS *transport.TLSInfo
|
||||
clientTLS *transport.TLSInfo
|
||||
authToken string
|
||||
quotaBackendBytes int64
|
||||
maxTxnOps uint
|
||||
maxRequestBytes uint
|
||||
snapshotCount uint64
|
||||
snapshotCatchUpEntries uint64
|
||||
grpcKeepAliveMinTime time.Duration
|
||||
grpcKeepAliveInterval time.Duration
|
||||
grpcKeepAliveTimeout time.Duration
|
||||
clientMaxCallSendMsgSize int
|
||||
clientMaxCallRecvMsgSize int
|
||||
useIP bool
|
||||
enableLeaseCheckpoint bool
|
||||
leaseCheckpointInterval time.Duration
|
||||
name string
|
||||
peerTLS *transport.TLSInfo
|
||||
clientTLS *transport.TLSInfo
|
||||
authToken string
|
||||
quotaBackendBytes int64
|
||||
maxTxnOps uint
|
||||
maxRequestBytes uint
|
||||
snapshotCount uint64
|
||||
snapshotCatchUpEntries uint64
|
||||
grpcKeepAliveMinTime time.Duration
|
||||
grpcKeepAliveInterval time.Duration
|
||||
grpcKeepAliveTimeout time.Duration
|
||||
clientMaxCallSendMsgSize int
|
||||
clientMaxCallRecvMsgSize int
|
||||
useIP bool
|
||||
enableLeaseCheckpoint bool
|
||||
leaseCheckpointInterval time.Duration
|
||||
WatchProgressNotifyInterval time.Duration
|
||||
}
|
||||
|
||||
// mustNewMember return an inited member with the given name. If peerTLS is
|
||||
@@ -678,6 +682,8 @@ func mustNewMember(t testing.TB, mcfg memberConfig) *member {
|
||||
m.EnableLeaseCheckpoint = mcfg.enableLeaseCheckpoint
|
||||
m.LeaseCheckpointInterval = mcfg.leaseCheckpointInterval
|
||||
|
||||
m.WatchProgressNotifyInterval = mcfg.WatchProgressNotifyInterval
|
||||
|
||||
m.InitialCorruptCheck = true
|
||||
|
||||
lcfg := logutil.DefaultZapLoggerConfig
|
||||
|
@@ -23,6 +23,8 @@ import (
|
||||
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
|
||||
)
|
||||
|
||||
const throughProxy = false
|
||||
|
||||
func toGRPC(c *clientv3.Client) grpcAPI {
|
||||
return grpcAPI{
|
||||
pb.NewClusterClient(c.ActiveConnection()),
|
||||
|
@@ -25,6 +25,8 @@ import (
|
||||
"go.etcd.io/etcd/proxy/grpcproxy/adapter"
|
||||
)
|
||||
|
||||
const throughProxy = true
|
||||
|
||||
var (
|
||||
pmu sync.Mutex
|
||||
proxies map[*clientv3.Client]grpcClientProxy = make(map[*clientv3.Client]grpcClientProxy)
|
||||
|
@@ -56,6 +56,9 @@ func testTLSCipherSuites(t *testing.T, valid bool) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// go1.13 enables TLS13 by default, and in TLS13, cipher suites are not configurable
|
||||
// setting Max TLS version to TLS12 for go1.13
|
||||
cc.MaxVersion = tls.VersionTLS12
|
||||
cli, cerr := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{clus.Members[0].GRPCAddr()},
|
||||
DialTimeout: time.Second,
|
||||
|
@@ -791,9 +791,11 @@ func testV3WatchMultipleEventsTxn(t *testing.T, startRev int64) {
|
||||
|
||||
type eventsSortByKey []*mvccpb.Event
|
||||
|
||||
func (evs eventsSortByKey) Len() int { return len(evs) }
|
||||
func (evs eventsSortByKey) Swap(i, j int) { evs[i], evs[j] = evs[j], evs[i] }
|
||||
func (evs eventsSortByKey) Less(i, j int) bool { return bytes.Compare(evs[i].Kv.Key, evs[j].Kv.Key) < 0 }
|
||||
func (evs eventsSortByKey) Len() int { return len(evs) }
|
||||
func (evs eventsSortByKey) Swap(i, j int) { evs[i], evs[j] = evs[j], evs[i] }
|
||||
func (evs eventsSortByKey) Less(i, j int) bool {
|
||||
return bytes.Compare(evs[i].Kv.Key, evs[j].Kv.Key) < 0
|
||||
}
|
||||
|
||||
func TestV3WatchMultipleEventsPutUnsynced(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
@@ -1211,3 +1213,43 @@ func TestV3WatchWithPrevKV(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestV3WatchCancellation ensures that watch cancellation frees up server resources.
|
||||
func TestV3WatchCancellation(t *testing.T) {
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cli := clus.RandClient()
|
||||
|
||||
// increment watcher total count and keep a stream open
|
||||
cli.Watch(ctx, "/foo")
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
cli.Watch(ctx, "/foo")
|
||||
cancel()
|
||||
}
|
||||
|
||||
// Wait a little for cancellations to take hold
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
minWatches, err := clus.Members[0].Metric("etcd_debugging_mvcc_watcher_total")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var expected string
|
||||
if throughProxy {
|
||||
// grpc proxy has additional 2 watches open
|
||||
expected = "3"
|
||||
} else {
|
||||
expected = "1"
|
||||
}
|
||||
|
||||
if minWatches != expected {
|
||||
t.Fatalf("expected %s watch, got %s", expected, minWatches)
|
||||
}
|
||||
}
|
||||
|
@@ -123,6 +123,8 @@ type BackendConfig struct {
|
||||
MmapSize uint64
|
||||
// Logger logs backend-side operations.
|
||||
Logger *zap.Logger
|
||||
// UnsafeNoFsync disables all uses of fsync.
|
||||
UnsafeNoFsync bool `json:"unsafe-no-fsync"`
|
||||
}
|
||||
|
||||
func DefaultBackendConfig() BackendConfig {
|
||||
@@ -150,6 +152,8 @@ func newBackend(bcfg BackendConfig) *backend {
|
||||
}
|
||||
bopts.InitialMmapSize = bcfg.mmapSize()
|
||||
bopts.FreelistType = bcfg.BackendFreelistType
|
||||
bopts.NoSync = bcfg.UnsafeNoFsync
|
||||
bopts.NoGrowSync = bcfg.UnsafeNoFsync
|
||||
|
||||
db, err := bolt.Open(bcfg.Path, 0600, bopts)
|
||||
if err != nil {
|
||||
@@ -350,6 +354,8 @@ func (b *backend) Defrag() error {
|
||||
|
||||
func (b *backend) defrag() error {
|
||||
now := time.Now()
|
||||
isDefragActive.Set(1)
|
||||
defer isDefragActive.Set(0)
|
||||
|
||||
// TODO: make this non-blocking?
|
||||
// lock batchTx to ensure nobody is using previous tx, and then
|
||||
|
@@ -83,6 +83,13 @@ var (
|
||||
// highest bucket start of 0.01 sec * 2^16 == 655.36 sec
|
||||
Buckets: prometheus.ExponentialBuckets(.01, 2, 17),
|
||||
})
|
||||
|
||||
isDefragActive = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "disk",
|
||||
Name: "defrag_inflight",
|
||||
Help: "Whether or not defrag is active on the member. 1 means active, 0 means not.",
|
||||
})
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -92,4 +99,5 @@ func init() {
|
||||
prometheus.MustRegister(writeSec)
|
||||
prometheus.MustRegister(defragSec)
|
||||
prometheus.MustRegister(snapshotTransferSec)
|
||||
prometheus.MustRegister(isDefragActive)
|
||||
}
|
||||
|
@@ -182,8 +182,8 @@ func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) {
|
||||
if err == nil {
|
||||
c = created.main
|
||||
oldLease = tw.s.le.GetLease(lease.LeaseItem{Key: string(key)})
|
||||
tw.trace.Step("get key's previous created_revision and leaseID")
|
||||
}
|
||||
tw.trace.Step("get key's previous created_revision and leaseID")
|
||||
ibytes := newRevBytes()
|
||||
idxRev := revision{main: rev, sub: int64(len(tw.changes))}
|
||||
revToBytes(idxRev, ibytes)
|
||||
|
@@ -30,9 +30,8 @@ import (
|
||||
var (
|
||||
// chanBufLen is the length of the buffered chan
|
||||
// for sending out watched events.
|
||||
// TODO: find a good buf value. 1024 is just a random one that
|
||||
// seems to be reasonable.
|
||||
chanBufLen = 1024
|
||||
// See https://github.com/etcd-io/etcd/issues/11906 for more detail.
|
||||
chanBufLen = 128
|
||||
|
||||
// maxWatchersPerSync is the number of watchers to sync in a single batch
|
||||
maxWatchersPerSync = 512
|
||||
|
@@ -18,5 +18,10 @@ package fileutil
|
||||
|
||||
import "os"
|
||||
|
||||
const (
|
||||
// PrivateDirMode grants owner to make/remove files inside the directory.
|
||||
PrivateDirMode = 0700
|
||||
)
|
||||
|
||||
// OpenDir opens a directory for syncing.
|
||||
func OpenDir(path string) (*os.File, error) { return os.Open(path) }
|
||||
|
@@ -21,6 +21,11 @@ import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
// PrivateDirMode grants owner to make/remove files inside the directory.
|
||||
PrivateDirMode = 0777
|
||||
)
|
||||
|
||||
// OpenDir opens a directory in windows with write access for syncing.
|
||||
func OpenDir(path string) (*os.File, error) {
|
||||
fd, err := openDir(path)
|
||||
|
@@ -27,8 +27,6 @@ import (
|
||||
const (
|
||||
// PrivateFileMode grants owner to read/write a file.
|
||||
PrivateFileMode = 0600
|
||||
// PrivateDirMode grants owner to make/remove files inside the directory.
|
||||
PrivateDirMode = 0700
|
||||
)
|
||||
|
||||
var plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "pkg/fileutil")
|
||||
@@ -46,14 +44,22 @@ func IsDirWriteable(dir string) error {
|
||||
// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory
|
||||
// does not exists. TouchDirAll also ensures the given directory is writable.
|
||||
func TouchDirAll(dir string) error {
|
||||
// If path is already a directory, MkdirAll does nothing
|
||||
// and returns nil.
|
||||
err := os.MkdirAll(dir, PrivateDirMode)
|
||||
if err != nil {
|
||||
// if mkdirAll("a/text") and "text" is not
|
||||
// a directory, this will return syscall.ENOTDIR
|
||||
return err
|
||||
// If path is already a directory, MkdirAll does nothing and returns nil, so,
|
||||
// first check if dir exist with an expected permission mode.
|
||||
if Exist(dir) {
|
||||
err := CheckDirPermission(dir, PrivateDirMode)
|
||||
if err != nil {
|
||||
plog.Warningf("check file permission: %v", err)
|
||||
}
|
||||
} else {
|
||||
err := os.MkdirAll(dir, PrivateDirMode)
|
||||
if err != nil {
|
||||
// if mkdirAll("a/text") and "text" is not
|
||||
// a directory, this will return syscall.ENOTDIR
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return IsDirWriteable(dir)
|
||||
}
|
||||
|
||||
@@ -102,3 +108,22 @@ func ZeroToEnd(f *os.File) error {
|
||||
_, err = f.Seek(off, io.SeekStart)
|
||||
return err
|
||||
}
|
||||
|
||||
// CheckDirPermission checks permission on an existing dir.
|
||||
// Returns error if dir is empty or exist with a different permission than specified.
|
||||
func CheckDirPermission(dir string, perm os.FileMode) error {
|
||||
if !Exist(dir) {
|
||||
return fmt.Errorf("directory %q empty, cannot check permission.", dir)
|
||||
}
|
||||
//check the existing permission on the directory
|
||||
dirInfo, err := os.Stat(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dirMode := dirInfo.Mode().Perm()
|
||||
if dirMode != perm {
|
||||
err = fmt.Errorf("directory %q exist, but the permission is %q. The recommended permission is %q to prevent possible unprivileged access to the data.", dir, dirInfo.Mode(), os.FileMode(PrivateDirMode))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -148,3 +148,21 @@ func TestZeroToEnd(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDirPermission(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir(os.TempDir(), "foo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
tmpdir2 := filepath.Join(tmpdir, "testpermission")
|
||||
// create a new dir with 0700
|
||||
if err = CreateDirAll(tmpdir2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// check dir permission with mode different than created dir
|
||||
if err = CheckDirPermission(tmpdir2, 0600); err == nil {
|
||||
t.Errorf("expected error, got nil")
|
||||
}
|
||||
}
|
||||
|
@@ -21,19 +21,14 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// This used to call syscall.Flock() but that call fails with EBADF on NFS.
|
||||
// An alternative is lockf() which works on NFS but that call lets a process lock
|
||||
// the same file twice. Instead, use Linux's non-standard open file descriptor
|
||||
// locks which will block if the process already holds the file lock.
|
||||
//
|
||||
// constants from /usr/include/bits/fcntl-linux.h
|
||||
const (
|
||||
F_OFD_GETLK = 37
|
||||
F_OFD_SETLK = 37
|
||||
F_OFD_SETLKW = 38
|
||||
)
|
||||
|
||||
var (
|
||||
wrlck = syscall.Flock_t{
|
||||
@@ -50,7 +45,7 @@ var (
|
||||
func init() {
|
||||
// use open file descriptor locks if the system supports it
|
||||
getlk := syscall.Flock_t{Type: syscall.F_RDLCK}
|
||||
if err := syscall.FcntlFlock(0, F_OFD_GETLK, &getlk); err == nil {
|
||||
if err := syscall.FcntlFlock(0, unix.F_OFD_GETLK, &getlk); err == nil {
|
||||
linuxTryLockFile = ofdTryLockFile
|
||||
linuxLockFile = ofdLockFile
|
||||
}
|
||||
@@ -67,7 +62,7 @@ func ofdTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error
|
||||
}
|
||||
|
||||
flock := wrlck
|
||||
if err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLK, &flock); err != nil {
|
||||
if err = syscall.FcntlFlock(f.Fd(), unix.F_OFD_SETLK, &flock); err != nil {
|
||||
f.Close()
|
||||
if err == syscall.EWOULDBLOCK {
|
||||
err = ErrLocked
|
||||
@@ -88,7 +83,7 @@ func ofdLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
|
||||
}
|
||||
|
||||
flock := wrlck
|
||||
err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLKW, &flock)
|
||||
err = syscall.FcntlFlock(f.Fd(), unix.F_OFD_SETLKW, &flock)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
|
@@ -1,82 +0,0 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package netutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
// DropPort drops all tcp packets that are received from the given port and sent to the given port.
|
||||
func DropPort(port int) error {
|
||||
cmdStr := fmt.Sprintf("sudo iptables -A OUTPUT -p tcp --destination-port %d -j DROP", port)
|
||||
if _, err := exec.Command("/bin/sh", "-c", cmdStr).Output(); err != nil {
|
||||
return err
|
||||
}
|
||||
cmdStr = fmt.Sprintf("sudo iptables -A INPUT -p tcp --destination-port %d -j DROP", port)
|
||||
_, err := exec.Command("/bin/sh", "-c", cmdStr).Output()
|
||||
return err
|
||||
}
|
||||
|
||||
// RecoverPort stops dropping tcp packets at given port.
|
||||
func RecoverPort(port int) error {
|
||||
cmdStr := fmt.Sprintf("sudo iptables -D OUTPUT -p tcp --destination-port %d -j DROP", port)
|
||||
if _, err := exec.Command("/bin/sh", "-c", cmdStr).Output(); err != nil {
|
||||
return err
|
||||
}
|
||||
cmdStr = fmt.Sprintf("sudo iptables -D INPUT -p tcp --destination-port %d -j DROP", port)
|
||||
_, err := exec.Command("/bin/sh", "-c", cmdStr).Output()
|
||||
return err
|
||||
}
|
||||
|
||||
// SetLatency adds latency in millisecond scale with random variations.
|
||||
func SetLatency(ms, rv int) error {
|
||||
ifces, err := GetDefaultInterfaces()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rv > ms {
|
||||
rv = 1
|
||||
}
|
||||
for ifce := range ifces {
|
||||
cmdStr := fmt.Sprintf("sudo tc qdisc add dev %s root netem delay %dms %dms distribution normal", ifce, ms, rv)
|
||||
_, err = exec.Command("/bin/sh", "-c", cmdStr).Output()
|
||||
if err != nil {
|
||||
// the rule has already been added. Overwrite it.
|
||||
cmdStr = fmt.Sprintf("sudo tc qdisc change dev %s root netem delay %dms %dms distribution normal", ifce, ms, rv)
|
||||
_, err = exec.Command("/bin/sh", "-c", cmdStr).Output()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveLatency resets latency configurations.
|
||||
func RemoveLatency() error {
|
||||
ifces, err := GetDefaultInterfaces()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for ifce := range ifces {
|
||||
_, err = exec.Command("/bin/sh", "-c", fmt.Sprintf("sudo tc qdisc del dev %s root netem", ifce)).Output()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
@@ -1,25 +0,0 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !linux
|
||||
|
||||
package netutil
|
||||
|
||||
func DropPort(port int) error { return nil }
|
||||
|
||||
func RecoverPort(port int) error { return nil }
|
||||
|
||||
func SetLatency(ms, rv int) error { return nil }
|
||||
|
||||
func RemoveLatency() error { return nil }
|
@@ -16,7 +16,7 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
@@ -29,9 +29,20 @@ func FDLimit() (uint64, error) {
|
||||
}
|
||||
|
||||
func FDUsage() (uint64, error) {
|
||||
fds, err := ioutil.ReadDir("/proc/self/fd")
|
||||
return countFiles("/proc/self/fd")
|
||||
}
|
||||
|
||||
// countFiles reads the directory named by dirname and returns the count.
|
||||
// This is same as stdlib "io/ioutil.ReadDir" but without sorting.
|
||||
func countFiles(dirname string) (uint64, error) {
|
||||
f, err := os.Open(dirname)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64(len(fds)), nil
|
||||
list, err := f.Readdir(-1)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64(len(list)), nil
|
||||
}
|
||||
|
@@ -118,6 +118,7 @@ func interestingGoroutines() (gs []string) {
|
||||
}
|
||||
stack := strings.TrimSpace(sl[1])
|
||||
if stack == "" ||
|
||||
strings.Contains(stack, "created by testing.runTests.func1") ||
|
||||
strings.Contains(stack, "sync.(*WaitGroup).Done") ||
|
||||
strings.Contains(stack, "os.(*file).close") ||
|
||||
strings.Contains(stack, "created by os/signal.init") ||
|
||||
|
@@ -1,3 +1,5 @@
|
||||
// +build !go1.15
|
||||
|
||||
// Copyright 2018 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
41
pkg/tlsutil/cipher_suites_go1.15.go
Normal file
41
pkg/tlsutil/cipher_suites_go1.15.go
Normal file
@@ -0,0 +1,41 @@
|
||||
// +build go1.15
|
||||
|
||||
// Copyright 2018 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tlsutil
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
// GetCipherSuite returns the corresponding cipher suite,
|
||||
// and boolean value if it is supported.
|
||||
func GetCipherSuite(s string) (uint16, bool) {
|
||||
for _, c := range tls.CipherSuites() {
|
||||
if s == c.Name {
|
||||
return c.ID, true
|
||||
}
|
||||
}
|
||||
for _, c := range tls.InsecureCipherSuites() {
|
||||
if s == c.Name {
|
||||
return c.ID, true
|
||||
}
|
||||
}
|
||||
switch s {
|
||||
case "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305":
|
||||
return tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, true
|
||||
case "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305":
|
||||
return tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, true
|
||||
}
|
||||
return 0, false
|
||||
}
|
52
pkg/tlsutil/cipher_suites_go1.15_test.go
Normal file
52
pkg/tlsutil/cipher_suites_go1.15_test.go
Normal file
@@ -0,0 +1,52 @@
|
||||
// +build go1.15
|
||||
|
||||
// Copyright 2018 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tlsutil
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGetCipherSuite_not_existing(t *testing.T) {
|
||||
_, ok := GetCipherSuite("not_existing")
|
||||
if ok {
|
||||
t.Fatal("Expected not ok")
|
||||
}
|
||||
}
|
||||
|
||||
func CipherSuiteExpectedToExist(tb testing.TB, cipher string, expectedId uint16) {
|
||||
vid, ok := GetCipherSuite(cipher)
|
||||
if !ok {
|
||||
tb.Errorf("Expected %v cipher to exist", cipher)
|
||||
}
|
||||
if vid != expectedId {
|
||||
tb.Errorf("For %v expected=%v found=%v", cipher, expectedId, vid)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCipherSuite_success(t *testing.T) {
|
||||
CipherSuiteExpectedToExist(t, "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA)
|
||||
CipherSuiteExpectedToExist(t, "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256)
|
||||
|
||||
// Explicit test for legacy names
|
||||
CipherSuiteExpectedToExist(t, "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256)
|
||||
CipherSuiteExpectedToExist(t, "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256)
|
||||
}
|
||||
|
||||
func TestGetCipherSuite_insecure(t *testing.T) {
|
||||
CipherSuiteExpectedToExist(t, "TLS_ECDHE_RSA_WITH_RC4_128_SHA", tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA)
|
||||
}
|
@@ -1,3 +1,5 @@
|
||||
// +build !go1.15
|
||||
|
||||
// Copyright 2018 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
@@ -31,6 +31,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/pkg/fileutil"
|
||||
"go.etcd.io/etcd/pkg/tlsutil"
|
||||
|
||||
"go.uber.org/zap"
|
||||
@@ -114,10 +115,17 @@ func (info TLSInfo) Empty() bool {
|
||||
}
|
||||
|
||||
func SelfCert(lg *zap.Logger, dirpath string, hosts []string, additionalUsages ...x509.ExtKeyUsage) (info TLSInfo, err error) {
|
||||
if err = os.MkdirAll(dirpath, 0700); err != nil {
|
||||
info.Logger = lg
|
||||
err = fileutil.TouchDirAll(dirpath)
|
||||
if err != nil {
|
||||
if info.Logger != nil {
|
||||
info.Logger.Warn(
|
||||
"cannot create cert directory",
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
return
|
||||
}
|
||||
info.Logger = lg
|
||||
|
||||
certPath := filepath.Join(dirpath, "cert.pem")
|
||||
keyPath := filepath.Join(dirpath, "key.pem")
|
||||
|
@@ -26,7 +26,7 @@ import (
|
||||
|
||||
// HandleHealth registers health handler on '/health'.
|
||||
func HandleHealth(mux *http.ServeMux, c *clientv3.Client) {
|
||||
mux.Handle(etcdhttp.PathHealth, etcdhttp.NewHealthHandler(func() etcdhttp.Health { return checkHealth(c) }))
|
||||
mux.Handle(etcdhttp.PathHealth, etcdhttp.NewHealthHandler(func(excludedAlarms etcdhttp.AlarmSet) etcdhttp.Health { return checkHealth(c) }))
|
||||
}
|
||||
|
||||
func checkHealth(c *clientv3.Client) etcdhttp.Health {
|
||||
|
@@ -62,8 +62,10 @@ func (a *rawNodeAdapter) ReadIndex(_ context.Context, rctx []byte) error {
|
||||
// RawNode swallowed the error in ReadIndex, it probably should not do that.
|
||||
return nil
|
||||
}
|
||||
func (a *rawNodeAdapter) Step(_ context.Context, m pb.Message) error { return a.RawNode.Step(m) }
|
||||
func (a *rawNodeAdapter) Propose(_ context.Context, data []byte) error { return a.RawNode.Propose(data) }
|
||||
func (a *rawNodeAdapter) Step(_ context.Context, m pb.Message) error { return a.RawNode.Step(m) }
|
||||
func (a *rawNodeAdapter) Propose(_ context.Context, data []byte) error {
|
||||
return a.RawNode.Propose(data)
|
||||
}
|
||||
func (a *rawNodeAdapter) ProposeConfChange(_ context.Context, cc pb.ConfChangeI) error {
|
||||
return a.RawNode.ProposeConfChange(cc)
|
||||
}
|
||||
|
11
test
11
test
@@ -120,8 +120,14 @@ fi
|
||||
echo "Running with TEST_CPUS:" "${TEST_CPUS}"
|
||||
|
||||
# determine whether target supports race detection
|
||||
if [ "$GOARCH" == "amd64" ]; then
|
||||
RACE="--race"
|
||||
if [ -z "${RACE}" ] ; then
|
||||
if [ "$GOARCH" == "amd64" ]; then
|
||||
RACE="--race"
|
||||
else
|
||||
RACE="--race=false"
|
||||
fi
|
||||
else
|
||||
RACE="--race=${RACE:-true}"
|
||||
fi
|
||||
|
||||
RUN_ARG=""
|
||||
@@ -462,6 +468,7 @@ function govet_shadow_pass {
|
||||
# Golang 1.12 onwards the experimental -shadow option is no longer available with go vet
|
||||
go get golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow
|
||||
export PATH=${GOPATH}/bin:${PATH}
|
||||
# shellcheck disable=SC2230
|
||||
shadow_tool=$(which shadow)
|
||||
vetRes=$(go vet -all -vettool="${shadow_tool}" "${TEST[@]}")
|
||||
if [ -n "${vetRes}" ]; then
|
||||
|
@@ -44,9 +44,11 @@ func TestCtlV3AuthFromKeyPerm(t *testing.T) { testCtl(t, authTestFromKeyPer
|
||||
func TestCtlV3AuthAndWatch(t *testing.T) { testCtl(t, authTestWatch) }
|
||||
func TestCtlV3AuthAndWatchJWT(t *testing.T) { testCtl(t, authTestWatch, withCfg(configJWT)) }
|
||||
|
||||
func TestCtlV3AuthLeaseTestKeepAlive(t *testing.T) { testCtl(t, authLeaseTestKeepAlive) }
|
||||
func TestCtlV3AuthLeaseTestTimeToLiveExpired(t *testing.T) { testCtl(t, authLeaseTestTimeToLiveExpired) }
|
||||
func TestCtlV3AuthLeaseGrantLeases(t *testing.T) { testCtl(t, authLeaseTestLeaseGrantLeases) }
|
||||
func TestCtlV3AuthLeaseTestKeepAlive(t *testing.T) { testCtl(t, authLeaseTestKeepAlive) }
|
||||
func TestCtlV3AuthLeaseTestTimeToLiveExpired(t *testing.T) {
|
||||
testCtl(t, authLeaseTestTimeToLiveExpired)
|
||||
}
|
||||
func TestCtlV3AuthLeaseGrantLeases(t *testing.T) { testCtl(t, authLeaseTestLeaseGrantLeases) }
|
||||
func TestCtlV3AuthLeaseGrantLeasesJWT(t *testing.T) {
|
||||
testCtl(t, authLeaseTestLeaseGrantLeases, withCfg(configJWT))
|
||||
}
|
||||
|
@@ -49,6 +49,7 @@ func metricsTest(cx ctlCtx) {
|
||||
{"/metrics", fmt.Sprintf("etcd_mvcc_delete_total 3")},
|
||||
{"/metrics", fmt.Sprintf(`etcd_server_version{server_version="%s"} 1`, version.Version)},
|
||||
{"/metrics", fmt.Sprintf(`etcd_cluster_version{cluster_version="%s"} 1`, version.Cluster(version.Version))},
|
||||
{"/metrics", fmt.Sprintf(`grpc_server_handled_total{grpc_code="Canceled",grpc_method="Watch",grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"} 6`)},
|
||||
{"/health", `{"health":"true"}`},
|
||||
} {
|
||||
i++
|
||||
@@ -58,7 +59,9 @@ func metricsTest(cx ctlCtx) {
|
||||
if err := ctlV3Del(cx, []string{fmt.Sprintf("%d", i)}, 1); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := ctlV3Watch(cx, []string{"k", "--rev", "1"}, []kvExec{{key: "k", val: "v"}}...); err != nil {
|
||||
cx.t.Fatal(err)
|
||||
}
|
||||
if err := cURLGet(cx.epc, cURLReq{endpoint: test.endpoint, expected: test.expected, metricsURLScheme: cx.cfg.metricsURLScheme}); err != nil {
|
||||
cx.t.Fatalf("failed get with curl (%v)", err)
|
||||
}
|
||||
|
@@ -62,12 +62,22 @@ func setupEmbedCfg(cfg *embed.Config, curls, purls, ics []url.URL) {
|
||||
cfg.InitialCluster = cfg.InitialCluster[1:]
|
||||
}
|
||||
|
||||
func getCommand(exec, name, dir, cURL, pURL, cluster string) string {
|
||||
s := fmt.Sprintf("%s --name %s --data-dir %s --listen-client-urls %s --advertise-client-urls %s ",
|
||||
exec, name, dir, cURL, cURL)
|
||||
s += fmt.Sprintf("--listen-peer-urls %s --initial-advertise-peer-urls %s ", pURL, pURL)
|
||||
s += fmt.Sprintf("--initial-cluster %s ", cluster)
|
||||
return s + "--initial-cluster-token tkn --initial-cluster-state new"
|
||||
func getCommand(exec, name, dir, cURL, pURL, cluster string) (args []string) {
|
||||
if !strings.Contains(exec, "etcd") {
|
||||
panic(fmt.Errorf("%q doesn't seem like etcd binary", exec))
|
||||
}
|
||||
return []string{
|
||||
exec,
|
||||
"--name", name,
|
||||
"--data-dir", dir,
|
||||
"--listen-client-urls", cURL,
|
||||
"--advertise-client-urls", cURL,
|
||||
"--listen-peer-urls", pURL,
|
||||
"--initial-advertise-peer-urls", pURL,
|
||||
"--initial-cluster", cluster,
|
||||
"--initial-cluster-token=tkn",
|
||||
"--initial-cluster-state=new",
|
||||
}
|
||||
}
|
||||
|
||||
func write(ep string) {
|
||||
|
@@ -47,7 +47,8 @@ func install(ver, dir string) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err = exec.Command("bash", "-c", fmt.Sprintf("tar xzvf %s -C %s --strip-components=1", tarPath, dir)).Run(); err != nil {
|
||||
// parametrizes to prevent attackers from adding arbitrary OS commands
|
||||
if err = exec.Command("tar", "xzvf", tarPath, "-C", dir, "--strip-components=1").Run(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(dir, "etcd"), nil
|
||||
|
@@ -87,7 +87,7 @@ func main() {
|
||||
rc := make(chan run)
|
||||
|
||||
cs1 := getCommand(bp, "s1", d1, "http://localhost:2379", "http://localhost:2380", cluster)
|
||||
cmd1 := exec.Command("bash", "-c", cs1)
|
||||
cmd1 := exec.Command(cs1[0], cs1[1:]...)
|
||||
go func() {
|
||||
if *debug {
|
||||
cmd1.Stderr = os.Stderr
|
||||
@@ -101,7 +101,7 @@ func main() {
|
||||
rc <- run{cmd: cmd1}
|
||||
}()
|
||||
cs2 := getCommand(bp, "s2", d2, "http://localhost:22379", "http://localhost:22380", cluster)
|
||||
cmd2 := exec.Command("bash", "-c", cs2)
|
||||
cmd2 := exec.Command(cs2[0], cs2[1:]...)
|
||||
go func() {
|
||||
if *debug {
|
||||
cmd2.Stderr = os.Stderr
|
||||
|
41
vendor/github.com/creack/pty/run.go
generated
vendored
41
vendor/github.com/creack/pty/run.go
generated
vendored
@@ -11,6 +11,8 @@ import (
|
||||
// Start assigns a pseudo-terminal tty os.File to c.Stdin, c.Stdout,
|
||||
// and c.Stderr, calls c.Start, and returns the File of the tty's
|
||||
// corresponding pty.
|
||||
//
|
||||
// Starts the process in a new session and sets the controlling terminal.
|
||||
func Start(c *exec.Cmd) (pty *os.File, err error) {
|
||||
return StartWithSize(c, nil)
|
||||
}
|
||||
@@ -19,16 +21,35 @@ func Start(c *exec.Cmd) (pty *os.File, err error) {
|
||||
// and c.Stderr, calls c.Start, and returns the File of the tty's
|
||||
// corresponding pty.
|
||||
//
|
||||
// This will resize the pty to the specified size before starting the command
|
||||
// This will resize the pty to the specified size before starting the command.
|
||||
// Starts the process in a new session and sets the controlling terminal.
|
||||
func StartWithSize(c *exec.Cmd, sz *Winsize) (pty *os.File, err error) {
|
||||
if c.SysProcAttr == nil {
|
||||
c.SysProcAttr = &syscall.SysProcAttr{}
|
||||
}
|
||||
c.SysProcAttr.Setsid = true
|
||||
c.SysProcAttr.Setctty = true
|
||||
return StartWithAttrs(c, sz, c.SysProcAttr)
|
||||
}
|
||||
|
||||
// StartWithAttrs assigns a pseudo-terminal tty os.File to c.Stdin, c.Stdout,
|
||||
// and c.Stderr, calls c.Start, and returns the File of the tty's
|
||||
// corresponding pty.
|
||||
//
|
||||
// This will resize the pty to the specified size before starting the command if a size is provided.
|
||||
// The `attrs` parameter overrides the one set in c.SysProcAttr.
|
||||
//
|
||||
// This should generally not be needed. Used in some edge cases where it is needed to create a pty
|
||||
// without a controlling terminal.
|
||||
func StartWithAttrs(c *exec.Cmd, sz *Winsize, attrs *syscall.SysProcAttr) (pty *os.File, err error) {
|
||||
pty, tty, err := Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer tty.Close()
|
||||
|
||||
if sz != nil {
|
||||
err = Setsize(pty, sz)
|
||||
if err != nil {
|
||||
if err := Setsize(pty, sz); err != nil {
|
||||
pty.Close()
|
||||
return nil, err
|
||||
}
|
||||
@@ -42,15 +63,11 @@ func StartWithSize(c *exec.Cmd, sz *Winsize) (pty *os.File, err error) {
|
||||
if c.Stdin == nil {
|
||||
c.Stdin = tty
|
||||
}
|
||||
if c.SysProcAttr == nil {
|
||||
c.SysProcAttr = &syscall.SysProcAttr{}
|
||||
}
|
||||
c.SysProcAttr.Setctty = true
|
||||
c.SysProcAttr.Setsid = true
|
||||
c.SysProcAttr.Ctty = int(tty.Fd())
|
||||
err = c.Start()
|
||||
if err != nil {
|
||||
pty.Close()
|
||||
|
||||
c.SysProcAttr = attrs
|
||||
|
||||
if err := c.Start(); err != nil {
|
||||
_ = pty.Close()
|
||||
return nil, err
|
||||
}
|
||||
return pty, err
|
||||
|
13
vendor/github.com/creack/pty/ztypes_freebsd_arm64.go
generated
vendored
Normal file
13
vendor/github.com/creack/pty/ztypes_freebsd_arm64.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
|
||||
// cgo -godefs types_freebsd.go
|
||||
|
||||
package pty
|
||||
|
||||
const (
|
||||
_C_SPECNAMELEN = 0xff
|
||||
)
|
||||
|
||||
type fiodgnameArg struct {
|
||||
Len int32
|
||||
Buf *byte
|
||||
}
|
@@ -1,5 +1,5 @@
|
||||
// Created by cgo -godefs - DO NOT EDIT
|
||||
// cgo -godefs types_openbsd.go
|
||||
// +build openbsd
|
||||
// +build 386 amd64 arm arm64
|
||||
|
||||
package pty
|
||||
|
13
vendor/github.com/creack/pty/ztypes_openbsd_amd64.go
generated
vendored
13
vendor/github.com/creack/pty/ztypes_openbsd_amd64.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
// Created by cgo -godefs - DO NOT EDIT
|
||||
// cgo -godefs types_openbsd.go
|
||||
|
||||
package pty
|
||||
|
||||
type ptmget struct {
|
||||
Cfd int32
|
||||
Sfd int32
|
||||
Cn [16]int8
|
||||
Sn [16]int8
|
||||
}
|
||||
|
||||
var ioctl_PTMGET = 0x40287401
|
@@ -1,4 +1,5 @@
|
||||
Copyright (c) 2012 Dave Grijalva
|
||||
Copyright (c) 2021 golang-jwt maintainers
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
@@ -35,18 +35,18 @@ func (c StandardClaims) Valid() error {
|
||||
|
||||
// The claims below are optional, by default, so if they are set to the
|
||||
// default value in Go, let's not fail the verification for them.
|
||||
if c.VerifyExpiresAt(now, false) == false {
|
||||
if !c.VerifyExpiresAt(now, false) {
|
||||
delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))
|
||||
vErr.Inner = fmt.Errorf("token is expired by %v", delta)
|
||||
vErr.Errors |= ValidationErrorExpired
|
||||
}
|
||||
|
||||
if c.VerifyIssuedAt(now, false) == false {
|
||||
if !c.VerifyIssuedAt(now, false) {
|
||||
vErr.Inner = fmt.Errorf("Token used before issued")
|
||||
vErr.Errors |= ValidationErrorIssuedAt
|
||||
}
|
||||
|
||||
if c.VerifyNotBefore(now, false) == false {
|
||||
if !c.VerifyNotBefore(now, false) {
|
||||
vErr.Inner = fmt.Errorf("token is not valid yet")
|
||||
vErr.Errors |= ValidationErrorNotValidYet
|
||||
}
|
||||
@@ -61,7 +61,7 @@ func (c StandardClaims) Valid() error {
|
||||
// Compares the aud claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool {
|
||||
return verifyAud(c.Audience, cmp, req)
|
||||
return verifyAud([]string{c.Audience}, cmp, req)
|
||||
}
|
||||
|
||||
// Compares the exp claim against cmp.
|
||||
@@ -90,15 +90,27 @@ func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool {
|
||||
|
||||
// ----- helpers
|
||||
|
||||
func verifyAud(aud string, cmp string, required bool) bool {
|
||||
if aud == "" {
|
||||
func verifyAud(aud []string, cmp string, required bool) bool {
|
||||
if len(aud) == 0 {
|
||||
return !required
|
||||
}
|
||||
if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 {
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
// use a var here to keep constant time compare when looping over a number of claims
|
||||
result := false
|
||||
|
||||
var stringClaims string
|
||||
for _, a := range aud {
|
||||
if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 {
|
||||
result = true
|
||||
}
|
||||
stringClaims = stringClaims + a
|
||||
}
|
||||
|
||||
// case where "" is sent in one or many aud claims
|
||||
if len(stringClaims) == 0 {
|
||||
return !required
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func verifyExp(exp int64, now int64, required bool) bool {
|
0
vendor/github.com/dgrijalva/jwt-go/doc.go → vendor/github.com/golang-jwt/jwt/doc.go
generated
vendored
0
vendor/github.com/dgrijalva/jwt-go/doc.go → vendor/github.com/golang-jwt/jwt/doc.go
generated
vendored
@@ -88,11 +88,11 @@ func (m *SigningMethodECDSA) Verify(signingString, signature string, key interfa
|
||||
hasher.Write([]byte(signingString))
|
||||
|
||||
// Verify the signature
|
||||
if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true {
|
||||
if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus {
|
||||
return nil
|
||||
} else {
|
||||
return ErrECDSAVerification
|
||||
}
|
||||
|
||||
return ErrECDSAVerification
|
||||
}
|
||||
|
||||
// Implements the Sign method from SigningMethod
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user