Compare commits
91 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
960f4604bc | ||
![]() |
22b67da920 | ||
![]() |
4b53ab0909 | ||
![]() |
b32ec69f9b | ||
![]() |
3ab9894b04 | ||
![]() |
00d6d4aba7 | ||
![]() |
88b5e22b73 | ||
![]() |
2bb8278fbf | ||
![]() |
935c76b8c3 | ||
![]() |
e83f50ec7c | ||
![]() |
232a81d804 | ||
![]() |
6ffc32cd06 | ||
![]() |
f8aeb21c2d | ||
![]() |
0520cb9304 | ||
![]() |
424e4ae1cc | ||
![]() |
a631a80a39 | ||
![]() |
fc08fd75ee | ||
![]() |
0f4a535c2f | ||
![]() |
c765bef483 | ||
![]() |
5586a5806e | ||
![]() |
d267ca9c18 | ||
![]() |
4176fe768f | ||
![]() |
950c846144 | ||
![]() |
0b78d66abe | ||
![]() |
2d58079626 | ||
![]() |
be171fa424 | ||
![]() |
4b60243fc5 | ||
![]() |
2c5d79f49f | ||
![]() |
424abca6ac | ||
![]() |
43b75072bf | ||
![]() |
78141fae60 | ||
![]() |
3be37f042e | ||
![]() |
7c896098d2 | ||
![]() |
30f4e36de4 | ||
![]() |
557abbe437 | ||
![]() |
4b448c209b | ||
![]() |
e5b7ee2d03 | ||
![]() |
a4c5731c38 | ||
![]() |
1f558ae678 | ||
![]() |
df93627bbb | ||
![]() |
a20295c65b | ||
![]() |
9f7bb0df3a | ||
![]() |
6a805e5222 | ||
![]() |
38f79fa565 | ||
![]() |
37a502cc88 | ||
![]() |
9be7fc5320 | ||
![]() |
288bccd288 | ||
![]() |
8cb5b48f58 | ||
![]() |
6538217528 | ||
![]() |
e983d6b343 | ||
![]() |
20490caaf0 | ||
![]() |
e156746959 | ||
![]() |
d84bf983cc | ||
![]() |
b44c6bff9d | ||
![]() |
8c3c1b4a9c | ||
![]() |
b478387a59 | ||
![]() |
dfc1f21f9d | ||
![]() |
41e52ebc22 | ||
![]() |
7bb538d4d4 | ||
![]() |
1622782e49 | ||
![]() |
99b47e0c1e | ||
![]() |
350d0cd211 | ||
![]() |
72f37ff79a | ||
![]() |
3221454cab | ||
![]() |
4a1bffdbc6 | ||
![]() |
9d9be2bc86 | ||
![]() |
e5462f74f1 | ||
![]() |
c68c1d9344 | ||
![]() |
6ed56cd723 | ||
![]() |
a3c6f6bf81 | ||
![]() |
21fdcc6443 | ||
![]() |
8d122e7011 | ||
![]() |
ade1d97893 | ||
![]() |
1300189581 | ||
![]() |
1971517806 | ||
![]() |
d614bb0799 | ||
![]() |
059dc91d4c | ||
![]() |
5fdbaee761 | ||
![]() |
714e7ec8db | ||
![]() |
2cdaf6d661 | ||
![]() |
77a51e0dbf | ||
![]() |
d96d3aa0ed | ||
![]() |
66e7532f57 | ||
![]() |
3eff360e79 | ||
![]() |
1487071966 | ||
![]() |
5d62bba9c7 | ||
![]() |
114e293119 | ||
![]() |
1439955536 | ||
![]() |
2c8ecc7e13 | ||
![]() |
7b4d622a7e | ||
![]() |
db8abbd975 |
16
.semaphore.sh
Executable file
16
.semaphore.sh
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
TEST_SUFFIX=$(date +%s | base64 | head -c 15)
|
||||
|
||||
TEST_OPTS="RELEASE_TEST=y INTEGRATION=y PASSES='build unit release integration_e2e functional' MANUAL_VER=v3.1.10"
|
||||
if [ "$TEST_ARCH" == "386" ]; then
|
||||
TEST_OPTS="GOARCH=386 PASSES='build unit integration_e2e'"
|
||||
fi
|
||||
|
||||
docker run \
|
||||
--rm \
|
||||
--volume=`pwd`:/go/src/github.com/coreos/etcd \
|
||||
gcr.io/etcd-development/etcd-test:go1.8.5 \
|
||||
/bin/bash -c "${TEST_OPTS} ./test 2>&1 | tee test-${TEST_SUFFIX}.log"
|
||||
|
||||
! egrep "(--- FAIL:|leak)" -A10 -B50 test-${TEST_SUFFIX}.log
|
14
.travis.yml
14
.travis.yml
@@ -4,7 +4,7 @@ go_import_path: github.com/coreos/etcd
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- 1.7.5
|
||||
- 1.8.5
|
||||
|
||||
notifications:
|
||||
on_success: never
|
||||
@@ -32,18 +32,6 @@ matrix:
|
||||
- go: tip
|
||||
env: TARGET=ppc64le
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- libpcap-dev
|
||||
- libaspell-dev
|
||||
- libhunspell-dev
|
||||
|
||||
before_install:
|
||||
- go get -v github.com/chzchzchz/goword
|
||||
- go get -v honnef.co/go/simple/cmd/gosimple
|
||||
- go get -v honnef.co/go/unused/cmd/unused
|
||||
|
||||
# disable godep restore override
|
||||
install:
|
||||
- pushd cmd/etcd && go get -t -v ./... && popd
|
||||
|
@@ -5,6 +5,12 @@ ADD etcdctl /usr/local/bin/
|
||||
RUN mkdir -p /var/etcd/
|
||||
RUN mkdir -p /var/lib/etcd/
|
||||
|
||||
# Alpine Linux doesn't use pam, which means that there is no /etc/nsswitch.conf,
|
||||
# but Golang relies on /etc/nsswitch.conf to check the order of DNS resolving
|
||||
# (see https://github.com/golang/go/commit/9dee7771f561cf6aee081c0af6658cc81fac3918)
|
||||
# To fix this we just create /etc/nsswitch.conf and add the following line:
|
||||
RUN echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf
|
||||
|
||||
EXPOSE 2379 2380
|
||||
|
||||
# Define default command.
|
||||
|
57
Dockerfile-test
Normal file
57
Dockerfile-test
Normal file
@@ -0,0 +1,57 @@
|
||||
FROM ubuntu:16.10
|
||||
|
||||
RUN rm /bin/sh && ln -s /bin/bash /bin/sh
|
||||
RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
|
||||
|
||||
RUN apt-get -y update \
|
||||
&& apt-get -y install \
|
||||
build-essential \
|
||||
gcc \
|
||||
apt-utils \
|
||||
pkg-config \
|
||||
software-properties-common \
|
||||
apt-transport-https \
|
||||
libssl-dev \
|
||||
sudo \
|
||||
bash \
|
||||
curl \
|
||||
wget \
|
||||
tar \
|
||||
git \
|
||||
netcat \
|
||||
libaspell-dev \
|
||||
libhunspell-dev \
|
||||
hunspell-en-us \
|
||||
aspell-en \
|
||||
shellcheck \
|
||||
&& apt-get -y update \
|
||||
&& apt-get -y upgrade \
|
||||
&& apt-get -y autoremove \
|
||||
&& apt-get -y autoclean
|
||||
|
||||
ENV GOROOT /usr/local/go
|
||||
ENV GOPATH /go
|
||||
ENV PATH ${GOPATH}/bin:${GOROOT}/bin:${PATH}
|
||||
ENV GO_VERSION REPLACE_ME_GO_VERSION
|
||||
ENV GO_DOWNLOAD_URL https://storage.googleapis.com/golang
|
||||
RUN rm -rf ${GOROOT} \
|
||||
&& curl -s ${GO_DOWNLOAD_URL}/go${GO_VERSION}.linux-amd64.tar.gz | tar -v -C /usr/local/ -xz \
|
||||
&& mkdir -p ${GOPATH}/src ${GOPATH}/bin \
|
||||
&& go version
|
||||
|
||||
RUN mkdir -p ${GOPATH}/src/github.com/coreos/etcd
|
||||
WORKDIR ${GOPATH}/src/github.com/coreos/etcd
|
||||
|
||||
ADD ./scripts/install-marker.sh /tmp/install-marker.sh
|
||||
|
||||
RUN go get -v -u -tags spell github.com/chzchzchz/goword \
|
||||
&& go get -v -u github.com/coreos/license-bill-of-materials \
|
||||
&& go get -v -u honnef.co/go/tools/cmd/gosimple \
|
||||
&& go get -v -u honnef.co/go/tools/cmd/unused \
|
||||
&& go get -v -u honnef.co/go/tools/cmd/staticcheck \
|
||||
&& go get -v -u github.com/wadey/gocovmerge \
|
||||
&& go get -v -u github.com/gordonklaus/ineffassign \
|
||||
&& /tmp/install-marker.sh amd64 \
|
||||
&& rm -f /tmp/install-marker.sh \
|
||||
&& curl -s https://codecov.io/bash >/codecov \
|
||||
&& chmod 700 /codecov
|
@@ -49,4 +49,4 @@ Bootstrap another machine and use the [hey HTTP benchmark tool][hey] to send req
|
||||
| 256 | 256 | all servers | 3061 | 119.3 |
|
||||
|
||||
[hey]: https://github.com/rakyll/hey
|
||||
[hack-benchmark]: /hack/benchmark/
|
||||
[hack-benchmark]: https://github.com/coreos/etcd/tree/master/hack/benchmark
|
||||
|
@@ -69,4 +69,4 @@ Bootstrap another machine and use the [hey HTTP benchmark tool][hey] to send req
|
||||
[hey]: https://github.com/rakyll/hey
|
||||
[c7146bd5]: https://github.com/coreos/etcd/commits/c7146bd5f2c73716091262edc638401bb8229144
|
||||
[etcd-2.1-benchmark]: etcd-2-1-0-alpha-benchmarks.md
|
||||
[hack-benchmark]: /hack/benchmark/
|
||||
[hack-benchmark]: ../../hack/benchmark/
|
||||
|
@@ -39,4 +39,4 @@ The performance is nearly the same as the one with empty server handler.
|
||||
The performance with empty server handler is not affected by one put. So the
|
||||
performance downgrade should be caused by storage package.
|
||||
|
||||
[etcd-v3-benchmark]: /tools/benchmark/
|
||||
[etcd-v3-benchmark]: ../../tools/benchmark/
|
||||
|
@@ -26,4 +26,4 @@ etcd uses the [capnslog][capnslog] library for logging application output catego
|
||||
* Send a normal message to a remote peer
|
||||
* Write a log entry to disk
|
||||
|
||||
[capnslog]: [https://github.com/coreos/pkg/tree/master/capnslog]
|
||||
[capnslog]: https://github.com/coreos/pkg/tree/master/capnslog
|
||||
|
@@ -475,5 +475,5 @@ To setup an etcd cluster with proxies of v2 API, please read the the [clustering
|
||||
[proxy]: https://github.com/coreos/etcd/blob/release-2.3/Documentation/proxy.md
|
||||
[clustering_etcd2]: https://github.com/coreos/etcd/blob/release-2.3/Documentation/clustering.md
|
||||
[security-guide]: security.md
|
||||
[tls-setup]: /hack/tls-setup
|
||||
[tls-setup]: ../../hack/tls-setup
|
||||
[gateway]: gateway.md
|
||||
|
@@ -286,7 +286,7 @@ Follow the instructions when using these flags.
|
||||
[build-cluster]: clustering.md#static
|
||||
[reconfig]: runtime-configuration.md
|
||||
[discovery]: clustering.md#discovery
|
||||
[iana-ports]: https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=etcd
|
||||
[iana-ports]: http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt
|
||||
[proxy]: ../v2/proxy.md
|
||||
[restore]: ../v2/admin_guide.md#restoring-a-backup
|
||||
[security]: security.md
|
||||
|
@@ -75,3 +75,4 @@ $ ETCDCTL_API=3 ./etcdctl --endpoints=127.0.0.1:2379 get foo
|
||||
foo
|
||||
bar
|
||||
```
|
||||
|
||||
|
@@ -219,6 +219,6 @@ Make sure to sign the certificates with a Subject Name the member's public IP ad
|
||||
The certificate needs to be signed for the member's FQDN in its Subject Name, use Subject Alternative Names (short IP SANs) to add the IP address. The `etcd-ca` tool provides `--domain=` option for its `new-cert` command, and openssl can make [it][alt-name] too.
|
||||
|
||||
[cfssl]: https://github.com/cloudflare/cfssl
|
||||
[tls-setup]: /hack/tls-setup
|
||||
[tls-setup]: ../../hack/tls-setup
|
||||
[tls-guide]: https://github.com/coreos/docs/blob/master/os/generate-self-signed-certificates.md
|
||||
[alt-name]: http://wiki.cacert.org/FAQ/subjectAltName
|
||||
|
@@ -60,3 +60,148 @@ Radius Intelligence uses Kubernetes running CoreOS to containerize and scale int
|
||||
|
||||
[teamcity]: https://www.jetbrains.com/teamcity/
|
||||
[raoofm]:https://github.com/raoofm
|
||||
|
||||
## Qiniu Cloud
|
||||
|
||||
- *Application*: system configuration for microservices, distributed locks
|
||||
- *Launched*: Jan. 2016
|
||||
- *Cluster Size*: 3 members each with several clusters
|
||||
- *Order of Data Size*: kilobytes
|
||||
- *Operator*: Pandora, chenchao@qiniu.com
|
||||
- *Environment*: Baremetal
|
||||
- *Backups*: None, all data can be recreated if necessary
|
||||
|
||||
## QingCloud
|
||||
|
||||
- *Application*: [QingCloud][qingcloud] appcenter cluster for service discovery as [metad][metad] backend.
|
||||
- *Launched*: December 2016
|
||||
- *Cluster Size*: 1 cluster of 3 members per user.
|
||||
- *Order of Data Size*: kilobytes
|
||||
- *Operator*: [yunify][yunify]
|
||||
- *Environment*: QingCloud IaaS
|
||||
- *Backups*: None, all data can be recreated if necessary.
|
||||
|
||||
[metad]:https://github.com/yunify/metad
|
||||
[yunify]:https://github.com/yunify
|
||||
[qingcloud]:https://qingcloud.com/
|
||||
|
||||
|
||||
## Yandex
|
||||
|
||||
- *Application*: system configuration for services, service discovery
|
||||
- *Launched*: March 2016
|
||||
- *Cluster Size*: 3 clusters of 5 members
|
||||
- *Order of Data Size*: several gigabytes
|
||||
- *Operator*: Yandex; [nekto0n][nekto0n]
|
||||
- *Environment*: Bare Metal
|
||||
- *Backups*: None
|
||||
|
||||
[nekto0n]:https://github.com/nekto0n
|
||||
|
||||
## Tencent Games
|
||||
|
||||
- *Application*: Meta data and configuration data for service discovery, Kubernetes, etc.
|
||||
- *Launched*: Jan. 2015
|
||||
- *Cluster Size*: 3 members each with 10s of clusters
|
||||
- *Order of Data Size*: 10s of Megabytes
|
||||
- *Operator*: Tencent Game Operations Department
|
||||
- *Environment*: Baremetal
|
||||
- *Backups*: Periodic sync to backup server
|
||||
|
||||
In Tencent games, we use Docker and Kubernetes to deploy and run our applications, and use etcd to save meta data for service discovery, Kubernetes, etc.
|
||||
|
||||
## Hyper.sh
|
||||
|
||||
- *Application*: Kubernetes, distributed locks, etc.
|
||||
- *Launched*: April 2016
|
||||
- *Cluster Size*: 1 cluster of 3 members
|
||||
- *Order of Data Size*: 10s of MB
|
||||
- *Operator*: Hyper.sh
|
||||
- *Environment*: Baremetal
|
||||
- *Backups*: None, all data can be recreated if necessary.
|
||||
|
||||
In [hyper.sh][hyper.sh], the container service is backed by [hypernetes][hypernetes], a multi-tenant kubernetes distro. Moreover, we use etcd to coordinate the multiple manage services and store global meta data.
|
||||
|
||||
[hypernetes]:https://github.com/hyperhq/hypernetes
|
||||
[Hyper.sh]:https://www.hyper.sh
|
||||
|
||||
## Meitu
|
||||
- *Application*: system configuration for services, service discovery, kubernetes in test environment
|
||||
- *Launched*: October 2015
|
||||
- *Cluster Size*: 1 cluster of 3 members
|
||||
- *Order of Data Size*: megabytes
|
||||
- *Operator*: Meitu, hxj@meitu.com, [shafreeck][shafreeck]
|
||||
- *Environment*: Bare Metal
|
||||
- *Backups*: None, all data can be recreated if necessary.
|
||||
|
||||
[shafreeck]:https://github.com/shafreeck
|
||||
|
||||
## Grab
|
||||
- *Application*: system configuration for services, service discovery
|
||||
- *Launched*: June 2016
|
||||
- *Cluster Size*: 1 cluster of 7 members
|
||||
- *Order of Data Size*: megabytes
|
||||
- *Operator*: Grab, [taxitan][taxitan], [reterVision][reterVision]
|
||||
- *Environment*: AWS
|
||||
- *Backups*: None, all data can be recreated if necessary.
|
||||
|
||||
[taxitan]:https://github.com/taxitan
|
||||
[reterVision]:https://github.com/reterVision
|
||||
|
||||
## DaoCloud.io
|
||||
|
||||
- *Application*: container management
|
||||
- *Launched*: Sep. 2015
|
||||
- *Cluster Size*: 1000+ deployments, each deployment contains a 3 node cluster.
|
||||
- *Order of Data Size*: 100s of Megabytes
|
||||
- *Operator*: daocloud.io
|
||||
- *Environment*: Baremetal and virtual machines
|
||||
- *Backups*: None, all data can be recreated if necessary.
|
||||
|
||||
In [DaoCloud][DaoCloud], we use Docker and Swarm to deploy and run our applications, and we use etcd to save metadata for service discovery.
|
||||
|
||||
[DaoCloud]:https://www.daocloud.io
|
||||
|
||||
## Branch.io
|
||||
|
||||
- *Application*: Kubernetes
|
||||
- *Launched*: April 2016
|
||||
- *Cluster Size*: Multiple clusters, multiple sizes
|
||||
- *Order of Data Size*: 100s of Megabytes
|
||||
- *Operator*: branch.io
|
||||
- *Environment*: AWS, Kubernetes
|
||||
- *Backups*: EBS volume backups
|
||||
|
||||
At [Branch][branch], we use kubernetes heavily as our core microservice platform for staging and production.
|
||||
|
||||
[branch]: https://branch.io
|
||||
|
||||
## Baidu Waimai
|
||||
|
||||
- *Application*: SkyDNS, Kubernetes, UDC, CMDB and other distributed systems
|
||||
- *Launched*: April. 2016
|
||||
- *Cluster Size*: 3 clusters of 5 members
|
||||
- *Order of Data Size*: several gigabytes
|
||||
- *Operator*: Baidu Waimai Operations Department
|
||||
- *Environment*: CentOS 6.5
|
||||
- *Backups*: backup scripts
|
||||
|
||||
## Salesforce.com
|
||||
|
||||
- *Application*: Kubernetes
|
||||
- *Launched*: Jan 2017
|
||||
- *Cluster Size*: Multiple clusters of 3 members
|
||||
- *Order of Data Size*: 100s of Megabytes
|
||||
- *Operator*: Salesforce.com (krmayankk@github)
|
||||
- *Environment*: BareMetal
|
||||
- *Backups*: None, all data can be recreated
|
||||
|
||||
## Hosted Graphite
|
||||
|
||||
- *Application*: Service discovery, locking, ephemeral application data
|
||||
- *Launched*: January 2017
|
||||
- *Cluster Size*: 2 clusters of 7 members
|
||||
- *Order of Data Size*: Megabytes
|
||||
- *Operator*: Hosted Graphite (sre@hostedgraphite.com)
|
||||
- *Environment*: Bare Metal
|
||||
- *Backups*: None, all data is considered ephemeral.
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# Reporting bugs
|
||||
|
||||
If any part of the etcd project has bugs or documentation mistakes, please let us know by [opening an issue][issue]. We treat bugs and mistakes very seriously and believe no issue is too small. Before creating a bug report, please check that an issue reporting the same problem does not already exist.
|
||||
If any part of the etcd project has bugs or documentation mistakes, please let us know by [opening an issue][etcd-issue]. We treat bugs and mistakes very seriously and believe no issue is too small. Before creating a bug report, please check that an issue reporting the same problem does not already exist.
|
||||
|
||||
To make the bug report accurate and easy to understand, please try to create bug reports that are:
|
||||
|
||||
|
@@ -67,13 +67,13 @@ You have successfully started an etcd and written a key to the store.
|
||||
|
||||
The [official etcd ports][iana-ports] are 2379 for client requests, and 2380 for peer communication. To maintain compatibility, some etcd configuration and documentation continues to refer to the legacy ports 4001 and 7001, but all new etcd use and discussion should adopt the IANA-assigned ports. The legacy ports 4001 and 7001 will be fully deprecated, and support for their use removed, in future etcd releases.
|
||||
|
||||
[iana-ports]: https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=etcd
|
||||
[iana-ports]: http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt
|
||||
|
||||
### Running local etcd cluster
|
||||
|
||||
First install [goreman](https://github.com/mattn/goreman), which manages Procfile-based applications.
|
||||
|
||||
Our [Procfile script](./Procfile) will set up a local example cluster. You can start it with:
|
||||
Our [Procfile script](../../V2Procfile) will set up a local example cluster. You can start it with:
|
||||
|
||||
```sh
|
||||
goreman start
|
||||
@@ -162,4 +162,4 @@ Currently only the amd64 architecture is officially supported by `etcd`.
|
||||
|
||||
### License
|
||||
|
||||
etcd is under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.
|
||||
etcd is under the Apache 2.0 license. See the [LICENSE](../../LICENSE) file for details.
|
||||
|
@@ -83,10 +83,10 @@ etcd does not ensure linearizability for watch operations. Users are expected to
|
||||
|
||||
etcd ensures linearizability for all other operations by default. Linearizability comes with a cost, however, because linearized requests must go through the Raft consensus process. To obtain lower latencies and higher throughput for read requests, clients can configure a request’s consistency mode to `serializable`, which may access stale data with respect to quorum, but removes the performance penalty of linearized accesses' reliance on live consensus.
|
||||
|
||||
[persistent-ds]: [https://en.wikipedia.org/wiki/Persistent_data_structure]
|
||||
[btree]: [https://en.wikipedia.org/wiki/B-tree]
|
||||
[b+tree]: [https://en.wikipedia.org/wiki/B%2B_tree]
|
||||
[seq_consistency]: [https://en.wikipedia.org/wiki/Consistency_model#Sequential_consistency]
|
||||
[strict_consistency]: [https://en.wikipedia.org/wiki/Consistency_model#Strict_consistency]
|
||||
[serializable_isolation]: [https://en.wikipedia.org/wiki/Isolation_(database_systems)#Serializable]
|
||||
[Linearizability]: [#Linearizability]
|
||||
[persistent-ds]: https://en.wikipedia.org/wiki/Persistent_data_structure
|
||||
[btree]: https://en.wikipedia.org/wiki/B-tree
|
||||
[b+tree]: https://en.wikipedia.org/wiki/B%2B_tree
|
||||
[seq_consistency]: https://en.wikipedia.org/wiki/Consistency_model#Sequential_consistency
|
||||
[strict_consistency]: https://en.wikipedia.org/wiki/Consistency_model#Strict_consistency
|
||||
[serializable_isolation]: https://en.wikipedia.org/wiki/Isolation_(database_systems)#Serializable
|
||||
[Linearizability]: #linearizability
|
||||
|
@@ -56,6 +56,7 @@ Proxy mode in 2.0 will provide similar functionality, and with improved control
|
||||
## Discovery Service
|
||||
|
||||
A size key needs to be provided inside a [discovery token][discoverytoken].
|
||||
|
||||
[discoverytoken]: clustering.md#custom-etcd-discovery-service
|
||||
|
||||
## HTTP Admin API
|
||||
|
@@ -49,4 +49,4 @@ Bootstrap another machine and use the [boom HTTP benchmark tool][boom] to send r
|
||||
| 256 | 256 | all servers | 3061 | 119.3 |
|
||||
|
||||
[boom]: https://github.com/rakyll/boom
|
||||
[hack-benchmark]: /hack/benchmark/
|
||||
[hack-benchmark]: ../../../hack/benchmark/
|
||||
|
@@ -24,7 +24,7 @@ Go OS/Arch: linux/amd64
|
||||
|
||||
## Testing
|
||||
|
||||
Bootstrap another machine, outside of the etcd cluster, and run the [`boom` HTTP benchmark tool](https://github.com/rakyll/boom) with a connection reuse patch to send requests to each etcd cluster member. See the [benchmark instructions](../../hack/benchmark/) for the patch and the steps to reproduce our procedures.
|
||||
Bootstrap another machine, outside of the etcd cluster, and run the [`boom` HTTP benchmark tool][boom] with a connection reuse patch to send requests to each etcd cluster member. See the [benchmark instructions][hack] for the patch and the steps to reproduce our procedures.
|
||||
|
||||
The performance is calulated through results of 100 benchmark rounds.
|
||||
|
||||
@@ -67,3 +67,6 @@ The performance is calulated through results of 100 benchmark rounds.
|
||||
- Write QPS to cluster leaders seems to be increased by a small margin. This is because the main loop and entry apply loops were decoupled in the etcd raft logic, eliminating several blocks between them.
|
||||
|
||||
- Write QPS to all members seems to be increased by a significant margin, because followers now receive the latest commit index sooner, and commit proposals more quickly.
|
||||
|
||||
[boom]: https://github.com/rakyll/boom
|
||||
[hack]: ../../../hack/benchmark/
|
||||
|
@@ -69,4 +69,4 @@ Bootstrap another machine and use the [boom HTTP benchmark tool][boom] to send r
|
||||
[boom]: https://github.com/rakyll/boom
|
||||
[c7146bd5]: https://github.com/coreos/etcd/commits/c7146bd5f2c73716091262edc638401bb8229144
|
||||
[etcd-2.1-benchmark]: etcd-2-1-0-alpha-benchmarks.md
|
||||
[hack-benchmark]: /hack/benchmark/
|
||||
[hack-benchmark]: ../../../hack/benchmark/
|
||||
|
@@ -39,4 +39,4 @@ The performance is nearly the same as the one with empty server handler.
|
||||
The performance with empty server handler is not affected by one put. So the
|
||||
performance downgrade should be caused by storage package.
|
||||
|
||||
[etcd-v3-benchmark]: /tools/benchmark/
|
||||
[etcd-v3-benchmark]: ../../../tools/benchmark/
|
||||
|
@@ -423,7 +423,7 @@ To make understanding this feature easier, we changed the naming of some flags,
|
||||
|-peers |none |Deprecated. The --initial-cluster flag provides a similar concept with different semantics. Please read this guide on cluster startup.|
|
||||
|-peers-file |none |Deprecated. The --initial-cluster flag provides a similar concept with different semantics. Please read this guide on cluster startup.|
|
||||
|
||||
[client]: /client
|
||||
[client]: ../../client
|
||||
[client-discoverer]: https://godoc.org/github.com/coreos/etcd/client#Discoverer
|
||||
[conf-adv-client]: configuration.md#-advertise-client-urls
|
||||
[conf-listen-client]: configuration.md#-listen-client-urls
|
||||
|
@@ -272,7 +272,7 @@ Follow the instructions when using these flags.
|
||||
[build-cluster]: clustering.md#static
|
||||
[reconfig]: runtime-configuration.md
|
||||
[discovery]: clustering.md#discovery
|
||||
[iana-ports]: https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=etcd
|
||||
[iana-ports]: http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt
|
||||
[proxy]: proxy.md
|
||||
[reconfig]: runtime-configuration.md
|
||||
[restore]: admin_guide.md#restoring-a-backup
|
||||
|
@@ -112,7 +112,6 @@
|
||||
- [mattn/etcdenv](https://github.com/mattn/etcdenv) - "env" shebang with etcd integration
|
||||
- [kelseyhightower/confd](https://github.com/kelseyhightower/confd) - Manage local app config files using templates and data from etcd
|
||||
- [configdb](https://git.autistici.org/ai/configdb/tree/master) - A REST relational abstraction on top of arbitrary database backends, aimed at storing configs and inventories.
|
||||
- [scrz](https://github.com/scrz/scrz) - Container manager, stores configuration in etcd.
|
||||
- [fleet](https://github.com/coreos/fleet) - Distributed init system
|
||||
- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) - Container cluster manager introduced by Google.
|
||||
- [mailgun/vulcand](https://github.com/mailgun/vulcand) - HTTP proxy that uses etcd as a configuration backend.
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# Reporting Bugs
|
||||
|
||||
If you find bugs or documentation mistakes in the etcd project, please let us know by [opening an issue][issue]. We treat bugs and mistakes very seriously and believe no issue is too small. Before creating a bug report, please check that an issue reporting the same problem does not already exist.
|
||||
If you find bugs or documentation mistakes in the etcd project, please let us know by [opening an issue][etcd-issue]. We treat bugs and mistakes very seriously and believe no issue is too small. Before creating a bug report, please check that an issue reporting the same problem does not already exist.
|
||||
|
||||
To make your bug report accurate and easy to understand, please try to create bug reports that are:
|
||||
|
||||
|
@@ -207,5 +207,5 @@ WatchResponse {
|
||||
|
||||
```
|
||||
|
||||
[api-protobuf]: https://github.com/coreos/etcd/blob/master/etcdserver/etcdserverpb/rpc.proto
|
||||
[kv-protobuf]: https://github.com/coreos/etcd/blob/master/storage/storagepb/kv.proto
|
||||
[api-protobuf]: https://github.com/coreos/etcd/blob/release-2.3/etcdserver/etcdserverpb/rpc.proto
|
||||
[kv-protobuf]: https://github.com/coreos/etcd/blob/release-2.3/storage/storagepb/kv.proto
|
||||
|
@@ -188,6 +188,6 @@ Make sure that you sign your certificates with a Subject Name your member's publ
|
||||
If you need your certificate to be signed for your member's FQDN in its Subject Name then you could use Subject Alternative Names (short IP SANs) to add your IP address. The `etcd-ca` tool provides `--domain=` option for its `new-cert` command, and openssl can make [it][alt-name] too.
|
||||
|
||||
[cfssl]: https://github.com/cloudflare/cfssl
|
||||
[tls-setup]: /hack/tls-setup
|
||||
[tls-setup]: ../../hack/tls-setup
|
||||
[tls-guide]: https://github.com/coreos/docs/blob/master/os/generate-self-signed-certificates.md
|
||||
[alt-name]: http://wiki.cacert.org/FAQ/subjectAltName
|
||||
|
@@ -78,7 +78,7 @@ That's it! etcd is now running and serving client requests. For more
|
||||
|
||||
The [official etcd ports][iana-ports] are 2379 for client requests, and 2380 for peer communication.
|
||||
|
||||
[iana-ports]: https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=etcd
|
||||
[iana-ports]: http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt
|
||||
|
||||
### Running a local etcd cluster
|
||||
|
||||
@@ -136,5 +136,3 @@ See [reporting bugs](Documentation/reporting_bugs.md) for details about reportin
|
||||
### License
|
||||
|
||||
etcd is under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.
|
||||
|
||||
|
||||
|
@@ -21,85 +21,92 @@ import (
|
||||
"crypto/rand"
|
||||
"math/big"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
defaultSimpleTokenLength = 16
|
||||
)
|
||||
|
||||
// var for testing purposes
|
||||
var (
|
||||
simpleTokenTTL = 5 * time.Minute
|
||||
simpleTokenTTLResolution = 1 * time.Second
|
||||
)
|
||||
|
||||
type simpleTokenTTLKeeper struct {
|
||||
tokens map[string]time.Time
|
||||
addSimpleTokenCh chan string
|
||||
resetSimpleTokenCh chan string
|
||||
deleteSimpleTokenCh chan string
|
||||
stopCh chan chan struct{}
|
||||
deleteTokenFunc func(string)
|
||||
}
|
||||
|
||||
func NewSimpleTokenTTLKeeper(deletefunc func(string)) *simpleTokenTTLKeeper {
|
||||
stk := &simpleTokenTTLKeeper{
|
||||
tokens: make(map[string]time.Time),
|
||||
addSimpleTokenCh: make(chan string, 1),
|
||||
resetSimpleTokenCh: make(chan string, 1),
|
||||
deleteSimpleTokenCh: make(chan string, 1),
|
||||
stopCh: make(chan chan struct{}),
|
||||
deleteTokenFunc: deletefunc,
|
||||
}
|
||||
go stk.run()
|
||||
return stk
|
||||
tokens map[string]time.Time
|
||||
donec chan struct{}
|
||||
stopc chan struct{}
|
||||
deleteTokenFunc func(string)
|
||||
mu *sync.Mutex
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) stop() {
|
||||
waitCh := make(chan struct{})
|
||||
tm.stopCh <- waitCh
|
||||
<-waitCh
|
||||
close(tm.stopCh)
|
||||
select {
|
||||
case tm.stopc <- struct{}{}:
|
||||
case <-tm.donec:
|
||||
}
|
||||
<-tm.donec
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) addSimpleToken(token string) {
|
||||
tm.addSimpleTokenCh <- token
|
||||
tm.tokens[token] = time.Now().Add(simpleTokenTTL)
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) resetSimpleToken(token string) {
|
||||
tm.resetSimpleTokenCh <- token
|
||||
if _, ok := tm.tokens[token]; ok {
|
||||
tm.tokens[token] = time.Now().Add(simpleTokenTTL)
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) deleteSimpleToken(token string) {
|
||||
tm.deleteSimpleTokenCh <- token
|
||||
delete(tm.tokens, token)
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) run() {
|
||||
tokenTicker := time.NewTicker(simpleTokenTTLResolution)
|
||||
defer tokenTicker.Stop()
|
||||
defer func() {
|
||||
tokenTicker.Stop()
|
||||
close(tm.donec)
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case t := <-tm.addSimpleTokenCh:
|
||||
tm.tokens[t] = time.Now().Add(simpleTokenTTL)
|
||||
case t := <-tm.resetSimpleTokenCh:
|
||||
if _, ok := tm.tokens[t]; ok {
|
||||
tm.tokens[t] = time.Now().Add(simpleTokenTTL)
|
||||
}
|
||||
case t := <-tm.deleteSimpleTokenCh:
|
||||
delete(tm.tokens, t)
|
||||
case <-tokenTicker.C:
|
||||
nowtime := time.Now()
|
||||
tm.mu.Lock()
|
||||
for t, tokenendtime := range tm.tokens {
|
||||
if nowtime.After(tokenendtime) {
|
||||
tm.deleteTokenFunc(t)
|
||||
delete(tm.tokens, t)
|
||||
}
|
||||
}
|
||||
case waitCh := <-tm.stopCh:
|
||||
tm.tokens = make(map[string]time.Time)
|
||||
waitCh <- struct{}{}
|
||||
tm.mu.Unlock()
|
||||
case <-tm.stopc:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (as *authStore) enable() {
|
||||
delf := func(tk string) {
|
||||
if username, ok := as.simpleTokens[tk]; ok {
|
||||
plog.Infof("deleting token %s for user %s", tk, username)
|
||||
delete(as.simpleTokens, tk)
|
||||
}
|
||||
}
|
||||
as.simpleTokenKeeper = &simpleTokenTTLKeeper{
|
||||
tokens: make(map[string]time.Time),
|
||||
donec: make(chan struct{}),
|
||||
stopc: make(chan struct{}),
|
||||
deleteTokenFunc: delf,
|
||||
mu: &as.simpleTokensMu,
|
||||
}
|
||||
go as.simpleTokenKeeper.run()
|
||||
}
|
||||
|
||||
func (as *authStore) GenSimpleToken() (string, error) {
|
||||
ret := make([]byte, defaultSimpleTokenLength)
|
||||
|
||||
@@ -117,7 +124,6 @@ func (as *authStore) GenSimpleToken() (string, error) {
|
||||
|
||||
func (as *authStore) assignSimpleTokenToUser(username, token string) {
|
||||
as.simpleTokensMu.Lock()
|
||||
|
||||
_, ok := as.simpleTokens[token]
|
||||
if ok {
|
||||
plog.Panicf("token %s is alredy used", token)
|
||||
@@ -129,13 +135,15 @@ func (as *authStore) assignSimpleTokenToUser(username, token string) {
|
||||
}
|
||||
|
||||
func (as *authStore) invalidateUser(username string) {
|
||||
if as.simpleTokenKeeper == nil {
|
||||
return
|
||||
}
|
||||
as.simpleTokensMu.Lock()
|
||||
defer as.simpleTokensMu.Unlock()
|
||||
|
||||
for token, name := range as.simpleTokens {
|
||||
if strings.Compare(name, username) == 0 {
|
||||
delete(as.simpleTokens, token)
|
||||
as.simpleTokenKeeper.deleteSimpleToken(token)
|
||||
}
|
||||
}
|
||||
as.simpleTokensMu.Unlock()
|
||||
}
|
||||
|
@@ -168,13 +168,13 @@ type authStore struct {
|
||||
|
||||
rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions
|
||||
|
||||
simpleTokensMu sync.RWMutex
|
||||
simpleTokens map[string]string // token -> username
|
||||
simpleTokenKeeper *simpleTokenTTLKeeper
|
||||
|
||||
revision uint64
|
||||
|
||||
indexWaiter func(uint64) <-chan struct{}
|
||||
// tokenSimple in v3.2+
|
||||
indexWaiter func(uint64) <-chan struct{}
|
||||
simpleTokenKeeper *simpleTokenTTLKeeper
|
||||
simpleTokensMu sync.Mutex
|
||||
simpleTokens map[string]string // token -> username
|
||||
}
|
||||
|
||||
func newDeleterFunc(as *authStore) func(string) {
|
||||
@@ -215,8 +215,7 @@ func (as *authStore) AuthEnable() error {
|
||||
tx.UnsafePut(authBucketName, enableFlagKey, authEnabled)
|
||||
|
||||
as.enabled = true
|
||||
|
||||
as.simpleTokenKeeper = NewSimpleTokenTTLKeeper(newDeleterFunc(as))
|
||||
as.enable()
|
||||
|
||||
as.rangePermCache = make(map[string]*unifiedRangePermissions)
|
||||
|
||||
@@ -244,11 +243,12 @@ func (as *authStore) AuthDisable() {
|
||||
as.enabled = false
|
||||
|
||||
as.simpleTokensMu.Lock()
|
||||
tk := as.simpleTokenKeeper
|
||||
as.simpleTokenKeeper = nil
|
||||
as.simpleTokens = make(map[string]string) // invalidate all tokens
|
||||
as.simpleTokensMu.Unlock()
|
||||
if as.simpleTokenKeeper != nil {
|
||||
as.simpleTokenKeeper.stop()
|
||||
as.simpleTokenKeeper = nil
|
||||
if tk != nil {
|
||||
tk.stop()
|
||||
}
|
||||
|
||||
plog.Noticef("Authentication disabled")
|
||||
@@ -646,13 +646,14 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse,
|
||||
}
|
||||
|
||||
func (as *authStore) AuthInfoFromToken(token string) (*AuthInfo, bool) {
|
||||
as.simpleTokensMu.RLock()
|
||||
defer as.simpleTokensMu.RUnlock()
|
||||
t, ok := as.simpleTokens[token]
|
||||
if ok {
|
||||
// same as '(t *tokenSimple) info' in v3.2+
|
||||
as.simpleTokensMu.Lock()
|
||||
username, ok := as.simpleTokens[token]
|
||||
if ok && as.simpleTokenKeeper != nil {
|
||||
as.simpleTokenKeeper.resetSimpleToken(token)
|
||||
}
|
||||
return &AuthInfo{Username: t, Revision: as.revision}, ok
|
||||
as.simpleTokensMu.Unlock()
|
||||
return &AuthInfo{Username: username, Revision: as.revision}, ok
|
||||
}
|
||||
|
||||
type permSlice []*authpb.Permission
|
||||
@@ -764,6 +765,9 @@ func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error {
|
||||
if !as.isAuthEnabled() {
|
||||
return nil
|
||||
}
|
||||
if authInfo == nil {
|
||||
return ErrUserEmpty
|
||||
}
|
||||
|
||||
tx := as.be.BatchTx()
|
||||
tx.Lock()
|
||||
@@ -908,10 +912,12 @@ func NewAuthStore(be backend.Backend, indexWaiter func(uint64) <-chan struct{})
|
||||
}
|
||||
|
||||
if enabled {
|
||||
as.simpleTokenKeeper = NewSimpleTokenTTLKeeper(newDeleterFunc(as))
|
||||
as.enable()
|
||||
}
|
||||
|
||||
as.commitRevision(tx)
|
||||
if as.revision == 0 {
|
||||
as.commitRevision(tx)
|
||||
}
|
||||
|
||||
tx.Unlock()
|
||||
be.ForceCommit()
|
||||
|
@@ -34,31 +34,30 @@ func dummyIndexWaiter(index uint64) <-chan struct{} {
|
||||
return ch
|
||||
}
|
||||
|
||||
func TestUserAdd(t *testing.T) {
|
||||
// TestNewAuthStoreRevision ensures newly auth store
|
||||
// keeps the old revision when there are no changes.
|
||||
func TestNewAuthStoreRevision(t *testing.T) {
|
||||
b, tPath := backend.NewDefaultTmpBackend()
|
||||
defer func() {
|
||||
b.Close()
|
||||
os.Remove(tPath)
|
||||
}()
|
||||
defer os.Remove(tPath)
|
||||
|
||||
as := NewAuthStore(b, dummyIndexWaiter)
|
||||
ua := &pb.AuthUserAddRequest{Name: "foo"}
|
||||
_, err := as.UserAdd(ua) // add a non-existing user
|
||||
err := enableAuthAndCreateRoot(as)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = as.UserAdd(ua) // add an existing user
|
||||
if err == nil {
|
||||
t.Fatalf("expected %v, got %v", ErrUserAlreadyExist, err)
|
||||
}
|
||||
if err != ErrUserAlreadyExist {
|
||||
t.Fatalf("expected %v, got %v", ErrUserAlreadyExist, err)
|
||||
}
|
||||
old := as.Revision()
|
||||
b.Close()
|
||||
as.Close()
|
||||
|
||||
ua = &pb.AuthUserAddRequest{Name: ""}
|
||||
_, err = as.UserAdd(ua) // add a user with empty name
|
||||
if err != ErrUserEmpty {
|
||||
t.Fatal(err)
|
||||
// no changes to commit
|
||||
b2 := backend.NewDefaultBackend(tPath)
|
||||
as = NewAuthStore(b2, dummyIndexWaiter)
|
||||
new := as.Revision()
|
||||
b2.Close()
|
||||
as.Close()
|
||||
|
||||
if old != new {
|
||||
t.Fatalf("expected revision %d, got %d", old, new)
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -34,7 +34,7 @@ import (
|
||||
func TestV2NoRetryEOF(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
// generate an EOF response; specify address so appears first in sorted ep list
|
||||
lEOF := integration.NewListenerWithAddr(t, fmt.Sprintf("eof:123.%d.sock", os.Getpid()))
|
||||
lEOF := integration.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid()))
|
||||
defer lEOF.Close()
|
||||
tries := uint32(0)
|
||||
go func() {
|
||||
@@ -65,8 +65,7 @@ func TestV2NoRetryEOF(t *testing.T) {
|
||||
// TestV2NoRetryNoLeader tests destructive api calls won't retry if given an error code.
|
||||
func TestV2NoRetryNoLeader(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
lHttp := integration.NewListenerWithAddr(t, fmt.Sprintf("errHttp:123.%d.sock", os.Getpid()))
|
||||
lHttp := integration.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid()))
|
||||
eh := &errHandler{errCode: http.StatusServiceUnavailable}
|
||||
srv := httptest.NewUnstartedServer(eh)
|
||||
defer lHttp.Close()
|
||||
|
@@ -282,8 +282,16 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo
|
||||
tokenMu: &sync.RWMutex{},
|
||||
}
|
||||
|
||||
err := c.getToken(context.TODO())
|
||||
if err != nil {
|
||||
ctx := c.ctx
|
||||
if c.cfg.DialTimeout > 0 {
|
||||
cctx, cancel := context.WithTimeout(ctx, c.cfg.DialTimeout)
|
||||
defer cancel()
|
||||
ctx = cctx
|
||||
}
|
||||
if err := c.getToken(ctx); err != nil {
|
||||
if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
|
||||
err = grpc.ErrClientConnTimeout
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -335,6 +343,8 @@ func newClient(cfg *Config) (*Client, error) {
|
||||
client.balancer = newSimpleBalancer(cfg.Endpoints)
|
||||
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer))
|
||||
if err != nil {
|
||||
client.cancel()
|
||||
client.balancer.Close()
|
||||
return nil, err
|
||||
}
|
||||
client.conn = conn
|
||||
@@ -353,6 +363,7 @@ func newClient(cfg *Config) (*Client, error) {
|
||||
}
|
||||
if !hasConn {
|
||||
client.cancel()
|
||||
client.balancer.Close()
|
||||
conn.Close()
|
||||
return nil, grpc.ErrClientConnTimeout
|
||||
}
|
||||
|
@@ -70,33 +70,45 @@ func TestDialCancel(t *testing.T) {
|
||||
func TestDialTimeout(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
donec := make(chan error)
|
||||
go func() {
|
||||
// without timeout, grpc keeps redialing if connection refused
|
||||
cfg := Config{
|
||||
Endpoints: []string{"localhost:12345"},
|
||||
DialTimeout: 2 * time.Second}
|
||||
c, err := New(cfg)
|
||||
if c != nil || err == nil {
|
||||
t.Errorf("new client should fail")
|
||||
}
|
||||
donec <- err
|
||||
}()
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
select {
|
||||
case err := <-donec:
|
||||
t.Errorf("dial didn't wait (%v)", err)
|
||||
default:
|
||||
testCfgs := []Config{
|
||||
{
|
||||
Endpoints: []string{"http://254.0.0.1:12345"},
|
||||
DialTimeout: 2 * time.Second,
|
||||
},
|
||||
{
|
||||
Endpoints: []string{"http://254.0.0.1:12345"},
|
||||
DialTimeout: time.Second,
|
||||
Username: "abc",
|
||||
Password: "def",
|
||||
},
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Errorf("failed to timeout dial on time")
|
||||
case err := <-donec:
|
||||
if err != grpc.ErrClientConnTimeout {
|
||||
t.Errorf("unexpected error %v, want %v", err, grpc.ErrClientConnTimeout)
|
||||
for i, cfg := range testCfgs {
|
||||
donec := make(chan error)
|
||||
go func() {
|
||||
// without timeout, dial continues forever on ipv4 blackhole
|
||||
c, err := New(cfg)
|
||||
if c != nil || err == nil {
|
||||
t.Errorf("#%d: new client should fail", i)
|
||||
}
|
||||
donec <- err
|
||||
}()
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
select {
|
||||
case err := <-donec:
|
||||
t.Errorf("#%d: dial didn't wait (%v)", i, err)
|
||||
default:
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Errorf("#%d: failed to timeout dial on time", i)
|
||||
case err := <-donec:
|
||||
if err != grpc.ErrClientConnTimeout {
|
||||
t.Errorf("#%d: unexpected error %v, want %v", i, err, grpc.ErrClientConnTimeout)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -156,6 +156,30 @@ func TestLeaseKeepAlive(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeaseKeepAliveOneSecond(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
|
||||
resp, err := cli.Grant(context.Background(), 1)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create lease %v", err)
|
||||
}
|
||||
rc, kerr := cli.KeepAlive(context.Background(), resp.ID)
|
||||
if kerr != nil {
|
||||
t.Errorf("failed to keepalive lease %v", kerr)
|
||||
}
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
if _, ok := <-rc; !ok {
|
||||
t.Errorf("chan is closed, want not closed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: add a client that can connect to all the members of cluster via unix sock.
|
||||
// TODO: test handle more complicated failures.
|
||||
func TestLeaseKeepAliveHandleFailure(t *testing.T) {
|
||||
|
@@ -347,7 +347,57 @@ func putAndWatch(t *testing.T, wctx *watchctx, key, val string) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestWatchResumeComapcted checks that the watcher gracefully closes in case
|
||||
func TestWatchResumeInitRev(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
||||
cli := clus.Client(0)
|
||||
if _, err := cli.Put(context.TODO(), "b", "2"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := cli.Put(context.TODO(), "a", "3"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// if resume is broken, it'll pick up this key first instead of a=3
|
||||
if _, err := cli.Put(context.TODO(), "a", "4"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
wch := clus.Client(0).Watch(context.Background(), "a", clientv3.WithRev(1), clientv3.WithCreatedNotify())
|
||||
if resp, ok := <-wch; !ok || resp.Header.Revision != 4 {
|
||||
t.Fatalf("got (%v, %v), expected create notification rev=4", resp, ok)
|
||||
}
|
||||
// pause wch
|
||||
clus.Members[0].DropConnections()
|
||||
clus.Members[0].PauseConnections()
|
||||
|
||||
select {
|
||||
case resp, ok := <-wch:
|
||||
t.Skipf("wch should block, got (%+v, %v); drop not fast enough", resp, ok)
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
}
|
||||
|
||||
// resume wch
|
||||
clus.Members[0].UnpauseConnections()
|
||||
|
||||
select {
|
||||
case resp, ok := <-wch:
|
||||
if !ok {
|
||||
t.Fatal("unexpected watch close")
|
||||
}
|
||||
if len(resp.Events) == 0 {
|
||||
t.Fatal("expected event on watch")
|
||||
}
|
||||
if string(resp.Events[0].Kv.Value) != "3" {
|
||||
t.Fatalf("expected value=3, got event %+v", resp.Events[0])
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("watch timed out")
|
||||
}
|
||||
}
|
||||
|
||||
// TestWatchResumeCompacted checks that the watcher gracefully closes in case
|
||||
// that it tries to resume to a revision that's been compacted out of the store.
|
||||
// Since the watcher's server restarts with stale data, the watcher will receive
|
||||
// either a compaction error or all keys by staying in sync before the compaction
|
||||
|
@@ -416,7 +416,7 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
|
||||
}
|
||||
|
||||
// send update to all channels
|
||||
nextKeepAlive := time.Now().Add(time.Duration(karesp.TTL+2) / 3 * time.Second)
|
||||
nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
|
||||
ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
|
||||
for _, ch := range ka.chs {
|
||||
select {
|
||||
|
@@ -132,6 +132,8 @@ type watchGrpcStream struct {
|
||||
errc chan error
|
||||
// closingc gets the watcherStream of closing watchers
|
||||
closingc chan *watcherStream
|
||||
// wg is Done when all substream goroutines have exited
|
||||
wg sync.WaitGroup
|
||||
|
||||
// resumec closes to signal that all substreams should begin resuming
|
||||
resumec chan struct{}
|
||||
@@ -406,7 +408,7 @@ func (w *watchGrpcStream) run() {
|
||||
for range closing {
|
||||
w.closeSubstream(<-w.closingc)
|
||||
}
|
||||
|
||||
w.wg.Wait()
|
||||
w.owner.closeStream(w)
|
||||
}()
|
||||
|
||||
@@ -431,6 +433,7 @@ func (w *watchGrpcStream) run() {
|
||||
}
|
||||
|
||||
ws.donec = make(chan struct{})
|
||||
w.wg.Add(1)
|
||||
go w.serveSubstream(ws, w.resumec)
|
||||
|
||||
// queue up for watcher creation/resume
|
||||
@@ -576,6 +579,7 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{
|
||||
if !resuming {
|
||||
w.closingc <- ws
|
||||
}
|
||||
w.wg.Done()
|
||||
}()
|
||||
|
||||
emptyWr := &WatchResponse{}
|
||||
@@ -612,10 +616,24 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{
|
||||
if ws.initReq.createdNotify {
|
||||
ws.outc <- *wr
|
||||
}
|
||||
// once the watch channel is returned, a current revision
|
||||
// watch must resume at the store revision. This is necessary
|
||||
// for the following case to work as expected:
|
||||
// wch := m1.Watch("a")
|
||||
// m2.Put("a", "b")
|
||||
// <-wch
|
||||
// If the revision is only bound on the first observed event,
|
||||
// if wch is disconnected before the Put is issued, then reconnects
|
||||
// after it is committed, it'll miss the Put.
|
||||
if ws.initReq.rev == 0 {
|
||||
nextRev = wr.Header.Revision
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// current progress of watch; <= store revision
|
||||
nextRev = wr.Header.Revision
|
||||
}
|
||||
|
||||
nextRev = wr.Header.Revision
|
||||
if len(wr.Events) > 0 {
|
||||
nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1
|
||||
}
|
||||
@@ -674,6 +692,7 @@ func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
|
||||
continue
|
||||
}
|
||||
ws.donec = make(chan struct{})
|
||||
w.wg.Add(1)
|
||||
go w.serveSubstream(ws, w.resumec)
|
||||
}
|
||||
|
||||
@@ -694,6 +713,10 @@ func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan str
|
||||
go func(ws *watcherStream) {
|
||||
defer wg.Done()
|
||||
if ws.closing {
|
||||
if ws.initReq.ctx.Err() != nil && ws.outc != nil {
|
||||
close(ws.outc)
|
||||
ws.outc = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
select {
|
||||
|
7
cmd/vendor/github.com/boltdb/bolt/bolt_amd64.go
generated
vendored
7
cmd/vendor/github.com/boltdb/bolt/bolt_amd64.go
generated
vendored
@@ -1,7 +0,0 @@
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
@@ -5,3 +5,6 @@ const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
10
cmd/vendor/github.com/coreos/bbolt/bolt_amd64.go
generated
vendored
Normal file
10
cmd/vendor/github.com/coreos/bbolt/bolt_amd64.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
28
cmd/vendor/github.com/coreos/bbolt/bolt_arm.go
generated
vendored
Normal file
28
cmd/vendor/github.com/coreos/bbolt/bolt_arm.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
package bolt
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned bool
|
||||
|
||||
func init() {
|
||||
// Simple check to see whether this arch handles unaligned load/stores
|
||||
// correctly.
|
||||
|
||||
// ARM9 and older devices require load/stores to be from/to aligned
|
||||
// addresses. If not, the lower 2 bits are cleared and that address is
|
||||
// read in a jumbled up order.
|
||||
|
||||
// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
|
||||
|
||||
raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
|
||||
val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
|
||||
|
||||
brokenUnaligned = val != 0x11222211
|
||||
}
|
@@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
12
cmd/vendor/github.com/coreos/bbolt/bolt_mips64x.go
generated
vendored
Normal file
12
cmd/vendor/github.com/coreos/bbolt/bolt_mips64x.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// +build mips64 mips64le
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x8000000000 // 512GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
@@ -1,7 +1,12 @@
|
||||
// +build mips mipsle
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
const maxMapSize = 0x40000000 // 1GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
@@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
@@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
@@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
@@ -13,29 +13,32 @@ import (
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
||||
var t time.Time
|
||||
if timeout != 0 {
|
||||
t = time.Now()
|
||||
}
|
||||
fd := db.file.Fd()
|
||||
flag := syscall.LOCK_NB
|
||||
if exclusive {
|
||||
flag |= syscall.LOCK_EX
|
||||
} else {
|
||||
flag |= syscall.LOCK_SH
|
||||
}
|
||||
for {
|
||||
// If we're beyond our timeout then return an error.
|
||||
// This can only occur after we've attempted a flock once.
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
} else if timeout > 0 && time.Since(t) > timeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
flag := syscall.LOCK_SH
|
||||
if exclusive {
|
||||
flag = syscall.LOCK_EX
|
||||
}
|
||||
|
||||
// Otherwise attempt to obtain an exclusive lock.
|
||||
err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB)
|
||||
// Attempt to obtain an exclusive lock.
|
||||
err := syscall.Flock(int(fd), flag)
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != syscall.EWOULDBLOCK {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we timed out then return an error.
|
||||
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
time.Sleep(flockRetryTimeout)
|
||||
}
|
||||
}
|
||||
|
@@ -13,34 +13,33 @@ import (
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
||||
var t time.Time
|
||||
if timeout != 0 {
|
||||
t = time.Now()
|
||||
}
|
||||
fd := db.file.Fd()
|
||||
var lockType int16
|
||||
if exclusive {
|
||||
lockType = syscall.F_WRLCK
|
||||
} else {
|
||||
lockType = syscall.F_RDLCK
|
||||
}
|
||||
for {
|
||||
// If we're beyond our timeout then return an error.
|
||||
// This can only occur after we've attempted a flock once.
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
} else if timeout > 0 && time.Since(t) > timeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
var lock syscall.Flock_t
|
||||
lock.Start = 0
|
||||
lock.Len = 0
|
||||
lock.Pid = 0
|
||||
lock.Whence = 0
|
||||
lock.Pid = 0
|
||||
if exclusive {
|
||||
lock.Type = syscall.F_WRLCK
|
||||
} else {
|
||||
lock.Type = syscall.F_RDLCK
|
||||
}
|
||||
err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock)
|
||||
// Attempt to obtain an exclusive lock.
|
||||
lock := syscall.Flock_t{Type: lockType}
|
||||
err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock)
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != syscall.EAGAIN {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we timed out then return an error.
|
||||
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
time.Sleep(flockRetryTimeout)
|
||||
}
|
||||
}
|
||||
|
@@ -59,29 +59,30 @@ func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) erro
|
||||
db.lockfile = f
|
||||
|
||||
var t time.Time
|
||||
if timeout != 0 {
|
||||
t = time.Now()
|
||||
}
|
||||
fd := f.Fd()
|
||||
var flag uint32 = flagLockFailImmediately
|
||||
if exclusive {
|
||||
flag |= flagLockExclusive
|
||||
}
|
||||
for {
|
||||
// If we're beyond our timeout then return an error.
|
||||
// This can only occur after we've attempted a flock once.
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
} else if timeout > 0 && time.Since(t) > timeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
|
||||
var flag uint32 = flagLockFailImmediately
|
||||
if exclusive {
|
||||
flag |= flagLockExclusive
|
||||
}
|
||||
|
||||
err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{})
|
||||
// Attempt to obtain an exclusive lock.
|
||||
err := lockFileEx(syscall.Handle(fd), flag, 0, 1, 0, &syscall.Overlapped{})
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != errLockViolation {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we timed oumercit then return an error.
|
||||
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
time.Sleep(flockRetryTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,7 +90,7 @@ func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) erro
|
||||
func funlock(db *DB) error {
|
||||
err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
|
||||
db.lockfile.Close()
|
||||
os.Remove(db.path+lockExt)
|
||||
os.Remove(db.path + lockExt)
|
||||
return err
|
||||
}
|
||||
|
@@ -14,13 +14,6 @@ const (
|
||||
MaxValueSize = (1 << 31) - 2
|
||||
)
|
||||
|
||||
const (
|
||||
maxUint = ^uint(0)
|
||||
minUint = 0
|
||||
maxInt = int(^uint(0) >> 1)
|
||||
minInt = -maxInt - 1
|
||||
)
|
||||
|
||||
const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
|
||||
|
||||
const (
|
||||
@@ -130,9 +123,17 @@ func (b *Bucket) Bucket(name []byte) *Bucket {
|
||||
func (b *Bucket) openBucket(value []byte) *Bucket {
|
||||
var child = newBucket(b.tx)
|
||||
|
||||
// If unaligned load/stores are broken on this arch and value is
|
||||
// unaligned simply clone to an aligned byte array.
|
||||
unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
|
||||
|
||||
if unaligned {
|
||||
value = cloneBytes(value)
|
||||
}
|
||||
|
||||
// If this is a writable transaction then we need to copy the bucket entry.
|
||||
// Read-only transactions can point directly at the mmap entry.
|
||||
if b.tx.writable {
|
||||
if b.tx.writable && !unaligned {
|
||||
child.bucket = &bucket{}
|
||||
*child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
|
||||
} else {
|
||||
@@ -167,9 +168,8 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
|
||||
if bytes.Equal(key, k) {
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
return nil, ErrBucketExists
|
||||
} else {
|
||||
return nil, ErrIncompatibleValue
|
||||
}
|
||||
return nil, ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// Create empty, inline bucket.
|
||||
@@ -316,7 +316,12 @@ func (b *Bucket) Delete(key []byte) error {
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
_, _, flags := c.seek(key)
|
||||
k, _, flags := c.seek(key)
|
||||
|
||||
// Return nil if the key doesn't exist.
|
||||
if !bytes.Equal(key, k) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return an error if there is already existing bucket value.
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
@@ -329,6 +334,28 @@ func (b *Bucket) Delete(key []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sequence returns the current integer for the bucket without incrementing it.
|
||||
func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
|
||||
|
||||
// SetSequence updates the sequence number for the bucket.
|
||||
func (b *Bucket) SetSequence(v uint64) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
// Materialize the root node if it hasn't been already so that the
|
||||
// bucket will be saved during commit.
|
||||
if b.rootNode == nil {
|
||||
_ = b.node(b.root, nil)
|
||||
}
|
||||
|
||||
// Increment and return the sequence.
|
||||
b.bucket.sequence = v
|
||||
return nil
|
||||
}
|
||||
|
||||
// NextSequence returns an autoincrementing integer for the bucket.
|
||||
func (b *Bucket) NextSequence() (uint64, error) {
|
||||
if b.tx.db == nil {
|
207
cmd/vendor/github.com/boltdb/bolt/db.go → cmd/vendor/github.com/coreos/bbolt/db.go
generated
vendored
207
cmd/vendor/github.com/boltdb/bolt/db.go → cmd/vendor/github.com/coreos/bbolt/db.go
generated
vendored
@@ -7,8 +7,7 @@ import (
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
@@ -23,6 +22,8 @@ const version = 2
|
||||
// Represents a marker value to indicate that a file is a Bolt DB.
|
||||
const magic uint32 = 0xED0CDAED
|
||||
|
||||
const pgidNoFreelist pgid = 0xffffffffffffffff
|
||||
|
||||
// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when
|
||||
// syncing changes to a file. This is required as some operating systems,
|
||||
// such as OpenBSD, do not have a unified buffer cache (UBC) and writes
|
||||
@@ -39,6 +40,9 @@ const (
|
||||
// default page size for db is set to the OS page size.
|
||||
var defaultPageSize = os.Getpagesize()
|
||||
|
||||
// The time elapsed between consecutive file locking attempts.
|
||||
const flockRetryTimeout = 50 * time.Millisecond
|
||||
|
||||
// DB represents a collection of buckets persisted to a file on disk.
|
||||
// All data access is performed through transactions which can be obtained through the DB.
|
||||
// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
|
||||
@@ -61,6 +65,11 @@ type DB struct {
|
||||
// THIS IS UNSAFE. PLEASE USE WITH CAUTION.
|
||||
NoSync bool
|
||||
|
||||
// When true, skips syncing freelist to disk. This improves the database
|
||||
// write performance under normal operation, but requires a full database
|
||||
// re-sync during recovery.
|
||||
NoFreelistSync bool
|
||||
|
||||
// When true, skips the truncate call when growing the database.
|
||||
// Setting this to true is only safe on non-ext3/ext4 systems.
|
||||
// Skipping truncation avoids preallocation of hard drive space and
|
||||
@@ -107,9 +116,11 @@ type DB struct {
|
||||
opened bool
|
||||
rwtx *Tx
|
||||
txs []*Tx
|
||||
freelist *freelist
|
||||
stats Stats
|
||||
|
||||
freelist *freelist
|
||||
freelistLoad sync.Once
|
||||
|
||||
pagePool sync.Pool
|
||||
|
||||
batchMu sync.Mutex
|
||||
@@ -148,14 +159,17 @@ func (db *DB) String() string {
|
||||
// If the file does not exist then it will be created automatically.
|
||||
// Passing in nil options will cause Bolt to open the database with the default options.
|
||||
func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
||||
var db = &DB{opened: true}
|
||||
|
||||
db := &DB{
|
||||
opened: true,
|
||||
}
|
||||
// Set default options if no options are provided.
|
||||
if options == nil {
|
||||
options = DefaultOptions
|
||||
}
|
||||
db.NoSync = options.NoSync
|
||||
db.NoGrowSync = options.NoGrowSync
|
||||
db.MmapFlags = options.MmapFlags
|
||||
db.NoFreelistSync = options.NoFreelistSync
|
||||
|
||||
// Set default values for later DB operations.
|
||||
db.MaxBatchSize = DefaultMaxBatchSize
|
||||
@@ -184,6 +198,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
||||
// The database file is locked using the shared lock (more than one process may
|
||||
// hold a lock at the same time) otherwise (options.ReadOnly is set).
|
||||
if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil {
|
||||
db.lockfile = nil // make 'unused' happy. TODO: rework locks
|
||||
_ = db.close()
|
||||
return nil, err
|
||||
}
|
||||
@@ -191,6 +206,11 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
||||
// Default values for test hooks
|
||||
db.ops.writeAt = db.file.WriteAt
|
||||
|
||||
if db.pageSize = options.PageSize; db.pageSize == 0 {
|
||||
// Set the default page size to the OS page size.
|
||||
db.pageSize = defaultPageSize
|
||||
}
|
||||
|
||||
// Initialize the database if it doesn't exist.
|
||||
if info, err := db.file.Stat(); err != nil {
|
||||
return nil, err
|
||||
@@ -202,20 +222,21 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
||||
} else {
|
||||
// Read the first meta page to determine the page size.
|
||||
var buf [0x1000]byte
|
||||
if _, err := db.file.ReadAt(buf[:], 0); err == nil {
|
||||
m := db.pageInBuffer(buf[:], 0).meta()
|
||||
if err := m.validate(); err != nil {
|
||||
// If we can't read the page size, we can assume it's the same
|
||||
// as the OS -- since that's how the page size was chosen in the
|
||||
// first place.
|
||||
//
|
||||
// If the first page is invalid and this OS uses a different
|
||||
// page size than what the database was created with then we
|
||||
// are out of luck and cannot access the database.
|
||||
db.pageSize = os.Getpagesize()
|
||||
} else {
|
||||
// If we can't read the page size, but can read a page, assume
|
||||
// it's the same as the OS or one given -- since that's how the
|
||||
// page size was chosen in the first place.
|
||||
//
|
||||
// If the first page is invalid and this OS uses a different
|
||||
// page size than what the database was created with then we
|
||||
// are out of luck and cannot access the database.
|
||||
//
|
||||
// TODO: scan for next page
|
||||
if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) {
|
||||
if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil {
|
||||
db.pageSize = int(m.pageSize)
|
||||
}
|
||||
} else {
|
||||
return nil, ErrInvalid
|
||||
}
|
||||
}
|
||||
|
||||
@@ -232,14 +253,50 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read in the freelist.
|
||||
db.freelist = newFreelist()
|
||||
db.freelist.read(db.page(db.meta().freelist))
|
||||
if db.readOnly {
|
||||
return db, nil
|
||||
}
|
||||
|
||||
db.loadFreelist()
|
||||
|
||||
// Flush freelist when transitioning from no sync to sync so
|
||||
// NoFreelistSync unaware boltdb can open the db later.
|
||||
if !db.NoFreelistSync && !db.hasSyncedFreelist() {
|
||||
tx, err := db.Begin(true)
|
||||
if tx != nil {
|
||||
err = tx.Commit()
|
||||
}
|
||||
if err != nil {
|
||||
_ = db.close()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Mark the database as opened and return.
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// loadFreelist reads the freelist if it is synced, or reconstructs it
|
||||
// by scanning the DB if it is not synced. It assumes there are no
|
||||
// concurrent accesses being made to the freelist.
|
||||
func (db *DB) loadFreelist() {
|
||||
db.freelistLoad.Do(func() {
|
||||
db.freelist = newFreelist()
|
||||
if !db.hasSyncedFreelist() {
|
||||
// Reconstruct free list by scanning the DB.
|
||||
db.freelist.readIDs(db.freepages())
|
||||
} else {
|
||||
// Read free list from freelist page.
|
||||
db.freelist.read(db.page(db.meta().freelist))
|
||||
}
|
||||
db.stats.FreePageN = len(db.freelist.ids)
|
||||
})
|
||||
}
|
||||
|
||||
func (db *DB) hasSyncedFreelist() bool {
|
||||
return db.meta().freelist != pgidNoFreelist
|
||||
}
|
||||
|
||||
// mmap opens the underlying memory-mapped file and initializes the meta references.
|
||||
// minsz is the minimum size that the new mmap can be.
|
||||
func (db *DB) mmap(minsz int) error {
|
||||
@@ -341,9 +398,6 @@ func (db *DB) mmapSize(size int) (int, error) {
|
||||
|
||||
// init creates a new database file and initializes its meta pages.
|
||||
func (db *DB) init() error {
|
||||
// Set the page size to the OS page size.
|
||||
db.pageSize = os.Getpagesize()
|
||||
|
||||
// Create two meta pages on a buffer.
|
||||
buf := make([]byte, db.pageSize*4)
|
||||
for i := 0; i < 2; i++ {
|
||||
@@ -526,21 +580,36 @@ func (db *DB) beginRWTx() (*Tx, error) {
|
||||
t := &Tx{writable: true}
|
||||
t.init(db)
|
||||
db.rwtx = t
|
||||
db.freePages()
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Free any pages associated with closed read-only transactions.
|
||||
var minid txid = 0xFFFFFFFFFFFFFFFF
|
||||
for _, t := range db.txs {
|
||||
if t.meta.txid < minid {
|
||||
minid = t.meta.txid
|
||||
}
|
||||
// freePages releases any pages associated with closed read-only transactions.
|
||||
func (db *DB) freePages() {
|
||||
// Free all pending pages prior to earliest open transaction.
|
||||
sort.Sort(txsById(db.txs))
|
||||
minid := txid(0xFFFFFFFFFFFFFFFF)
|
||||
if len(db.txs) > 0 {
|
||||
minid = db.txs[0].meta.txid
|
||||
}
|
||||
if minid > 0 {
|
||||
db.freelist.release(minid - 1)
|
||||
}
|
||||
|
||||
return t, nil
|
||||
// Release unused txid extents.
|
||||
for _, t := range db.txs {
|
||||
db.freelist.releaseRange(minid, t.meta.txid-1)
|
||||
minid = t.meta.txid + 1
|
||||
}
|
||||
db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF))
|
||||
// Any page both allocated and freed in an extent is safe to release.
|
||||
}
|
||||
|
||||
type txsById []*Tx
|
||||
|
||||
func (t txsById) Len() int { return len(t) }
|
||||
func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
|
||||
func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid }
|
||||
|
||||
// removeTx removes a transaction from the database.
|
||||
func (db *DB) removeTx(tx *Tx) {
|
||||
// Release the read lock on the mmap.
|
||||
@@ -552,7 +621,10 @@ func (db *DB) removeTx(tx *Tx) {
|
||||
// Remove the transaction.
|
||||
for i, t := range db.txs {
|
||||
if t == tx {
|
||||
db.txs = append(db.txs[:i], db.txs[i+1:]...)
|
||||
last := len(db.txs) - 1
|
||||
db.txs[i] = db.txs[last]
|
||||
db.txs[last] = nil
|
||||
db.txs = db.txs[:last]
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -630,11 +702,7 @@ func (db *DB) View(fn func(*Tx) error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := t.Rollback(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return t.Rollback()
|
||||
}
|
||||
|
||||
// Batch calls fn as part of a batch. It behaves similar to Update,
|
||||
@@ -823,7 +891,7 @@ func (db *DB) meta() *meta {
|
||||
}
|
||||
|
||||
// allocate returns a contiguous block of memory starting at a given page.
|
||||
func (db *DB) allocate(count int) (*page, error) {
|
||||
func (db *DB) allocate(txid txid, count int) (*page, error) {
|
||||
// Allocate a temporary buffer for the page.
|
||||
var buf []byte
|
||||
if count == 1 {
|
||||
@@ -835,7 +903,7 @@ func (db *DB) allocate(count int) (*page, error) {
|
||||
p.overflow = uint32(count - 1)
|
||||
|
||||
// Use pages from the freelist if they are available.
|
||||
if p.id = db.freelist.allocate(count); p.id != 0 {
|
||||
if p.id = db.freelist.allocate(txid, count); p.id != 0 {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
@@ -890,6 +958,38 @@ func (db *DB) IsReadOnly() bool {
|
||||
return db.readOnly
|
||||
}
|
||||
|
||||
func (db *DB) freepages() []pgid {
|
||||
tx, err := db.beginTx()
|
||||
defer func() {
|
||||
err = tx.Rollback()
|
||||
if err != nil {
|
||||
panic("freepages: failed to rollback tx")
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
panic("freepages: failed to open read only tx")
|
||||
}
|
||||
|
||||
reachable := make(map[pgid]*page)
|
||||
nofreed := make(map[pgid]bool)
|
||||
ech := make(chan error)
|
||||
go func() {
|
||||
for e := range ech {
|
||||
panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e))
|
||||
}
|
||||
}()
|
||||
tx.checkBucket(&tx.root, reachable, nofreed, ech)
|
||||
close(ech)
|
||||
|
||||
var fids []pgid
|
||||
for i := pgid(2); i < db.meta().pgid; i++ {
|
||||
if _, ok := reachable[i]; !ok {
|
||||
fids = append(fids, i)
|
||||
}
|
||||
}
|
||||
return fids
|
||||
}
|
||||
|
||||
// Options represents the options that can be set when opening a database.
|
||||
type Options struct {
|
||||
// Timeout is the amount of time to wait to obtain a file lock.
|
||||
@@ -900,6 +1000,10 @@ type Options struct {
|
||||
// Sets the DB.NoGrowSync flag before memory mapping the file.
|
||||
NoGrowSync bool
|
||||
|
||||
// Do not sync freelist to disk. This improves the database write performance
|
||||
// under normal operation, but requires a full database re-sync during recovery.
|
||||
NoFreelistSync bool
|
||||
|
||||
// Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to
|
||||
// grab a shared lock (UNIX).
|
||||
ReadOnly bool
|
||||
@@ -916,6 +1020,14 @@ type Options struct {
|
||||
// If initialMmapSize is smaller than the previous database size,
|
||||
// it takes no effect.
|
||||
InitialMmapSize int
|
||||
|
||||
// PageSize overrides the default OS page size.
|
||||
PageSize int
|
||||
|
||||
// NoSync sets the initial value of DB.NoSync. Normally this can just be
|
||||
// set directly on the DB itself when returned from Open(), but this option
|
||||
// is useful in APIs which expose Options but not the underlying DB.
|
||||
NoSync bool
|
||||
}
|
||||
|
||||
// DefaultOptions represent the options used if nil options are passed into Open().
|
||||
@@ -952,15 +1064,11 @@ func (s *Stats) Sub(other *Stats) Stats {
|
||||
diff.PendingPageN = s.PendingPageN
|
||||
diff.FreeAlloc = s.FreeAlloc
|
||||
diff.FreelistInuse = s.FreelistInuse
|
||||
diff.TxN = other.TxN - s.TxN
|
||||
diff.TxN = s.TxN - other.TxN
|
||||
diff.TxStats = s.TxStats.Sub(&other.TxStats)
|
||||
return diff
|
||||
}
|
||||
|
||||
func (s *Stats) add(other *Stats) {
|
||||
s.TxStats.add(&other.TxStats)
|
||||
}
|
||||
|
||||
type Info struct {
|
||||
Data uintptr
|
||||
PageSize int
|
||||
@@ -999,7 +1107,8 @@ func (m *meta) copy(dest *meta) {
|
||||
func (m *meta) write(p *page) {
|
||||
if m.root.root >= m.pgid {
|
||||
panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid))
|
||||
} else if m.freelist >= m.pgid {
|
||||
} else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist {
|
||||
// TODO: reject pgidNoFreeList if !NoFreelistSync
|
||||
panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid))
|
||||
}
|
||||
|
||||
@@ -1026,11 +1135,3 @@ func _assert(condition bool, msg string, v ...interface{}) {
|
||||
panic(fmt.Sprintf("assertion failed: "+msg, v...))
|
||||
}
|
||||
}
|
||||
|
||||
func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) }
|
||||
func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) }
|
||||
|
||||
func printstack() {
|
||||
stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n")
|
||||
fmt.Fprintln(os.Stderr, stack)
|
||||
}
|
0
cmd/vendor/github.com/boltdb/bolt/doc.go → cmd/vendor/github.com/coreos/bbolt/doc.go
generated
vendored
0
cmd/vendor/github.com/boltdb/bolt/doc.go → cmd/vendor/github.com/coreos/bbolt/doc.go
generated
vendored
@@ -6,25 +6,40 @@ import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// txPending holds a list of pgids and corresponding allocation txns
|
||||
// that are pending to be freed.
|
||||
type txPending struct {
|
||||
ids []pgid
|
||||
alloctx []txid // txids allocating the ids
|
||||
lastReleaseBegin txid // beginning txid of last matching releaseRange
|
||||
}
|
||||
|
||||
// freelist represents a list of all pages that are available for allocation.
|
||||
// It also tracks pages that have been freed but are still in use by open transactions.
|
||||
type freelist struct {
|
||||
ids []pgid // all free and available free page ids.
|
||||
pending map[txid][]pgid // mapping of soon-to-be free page ids by tx.
|
||||
cache map[pgid]bool // fast lookup of all free and pending page ids.
|
||||
ids []pgid // all free and available free page ids.
|
||||
allocs map[pgid]txid // mapping of txid that allocated a pgid.
|
||||
pending map[txid]*txPending // mapping of soon-to-be free page ids by tx.
|
||||
cache map[pgid]bool // fast lookup of all free and pending page ids.
|
||||
}
|
||||
|
||||
// newFreelist returns an empty, initialized freelist.
|
||||
func newFreelist() *freelist {
|
||||
return &freelist{
|
||||
pending: make(map[txid][]pgid),
|
||||
allocs: make(map[pgid]txid),
|
||||
pending: make(map[txid]*txPending),
|
||||
cache: make(map[pgid]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// size returns the size of the page after serialization.
|
||||
func (f *freelist) size() int {
|
||||
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count())
|
||||
n := f.count()
|
||||
if n >= 0xFFFF {
|
||||
// The first element will be used to store the count. See freelist.write.
|
||||
n++
|
||||
}
|
||||
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
|
||||
}
|
||||
|
||||
// count returns count of pages on the freelist
|
||||
@@ -40,27 +55,26 @@ func (f *freelist) free_count() int {
|
||||
// pending_count returns count of pending pages
|
||||
func (f *freelist) pending_count() int {
|
||||
var count int
|
||||
for _, list := range f.pending {
|
||||
count += len(list)
|
||||
for _, txp := range f.pending {
|
||||
count += len(txp.ids)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// all returns a list of all free ids and all pending ids in one sorted list.
|
||||
func (f *freelist) all() []pgid {
|
||||
m := make(pgids, 0)
|
||||
|
||||
for _, list := range f.pending {
|
||||
m = append(m, list...)
|
||||
// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
|
||||
// f.count returns the minimum length required for dst.
|
||||
func (f *freelist) copyall(dst []pgid) {
|
||||
m := make(pgids, 0, f.pending_count())
|
||||
for _, txp := range f.pending {
|
||||
m = append(m, txp.ids...)
|
||||
}
|
||||
|
||||
sort.Sort(m)
|
||||
return pgids(f.ids).merge(m)
|
||||
mergepgids(dst, f.ids, m)
|
||||
}
|
||||
|
||||
// allocate returns the starting page id of a contiguous list of pages of a given size.
|
||||
// If a contiguous block cannot be found then 0 is returned.
|
||||
func (f *freelist) allocate(n int) pgid {
|
||||
func (f *freelist) allocate(txid txid, n int) pgid {
|
||||
if len(f.ids) == 0 {
|
||||
return 0
|
||||
}
|
||||
@@ -93,7 +107,7 @@ func (f *freelist) allocate(n int) pgid {
|
||||
for i := pgid(0); i < pgid(n); i++ {
|
||||
delete(f.cache, initial+i)
|
||||
}
|
||||
|
||||
f.allocs[initial] = txid
|
||||
return initial
|
||||
}
|
||||
|
||||
@@ -110,28 +124,73 @@ func (f *freelist) free(txid txid, p *page) {
|
||||
}
|
||||
|
||||
// Free page and all its overflow pages.
|
||||
var ids = f.pending[txid]
|
||||
txp := f.pending[txid]
|
||||
if txp == nil {
|
||||
txp = &txPending{}
|
||||
f.pending[txid] = txp
|
||||
}
|
||||
allocTxid, ok := f.allocs[p.id]
|
||||
if ok {
|
||||
delete(f.allocs, p.id)
|
||||
} else if (p.flags & freelistPageFlag) != 0 {
|
||||
// Freelist is always allocated by prior tx.
|
||||
allocTxid = txid - 1
|
||||
}
|
||||
|
||||
for id := p.id; id <= p.id+pgid(p.overflow); id++ {
|
||||
// Verify that page is not already free.
|
||||
if f.cache[id] {
|
||||
panic(fmt.Sprintf("page %d already freed", id))
|
||||
}
|
||||
|
||||
// Add to the freelist and cache.
|
||||
ids = append(ids, id)
|
||||
txp.ids = append(txp.ids, id)
|
||||
txp.alloctx = append(txp.alloctx, allocTxid)
|
||||
f.cache[id] = true
|
||||
}
|
||||
f.pending[txid] = ids
|
||||
}
|
||||
|
||||
// release moves all page ids for a transaction id (or older) to the freelist.
|
||||
func (f *freelist) release(txid txid) {
|
||||
m := make(pgids, 0)
|
||||
for tid, ids := range f.pending {
|
||||
for tid, txp := range f.pending {
|
||||
if tid <= txid {
|
||||
// Move transaction's pending pages to the available freelist.
|
||||
// Don't remove from the cache since the page is still free.
|
||||
m = append(m, ids...)
|
||||
m = append(m, txp.ids...)
|
||||
delete(f.pending, tid)
|
||||
}
|
||||
}
|
||||
sort.Sort(m)
|
||||
f.ids = pgids(f.ids).merge(m)
|
||||
}
|
||||
|
||||
// releaseRange moves pending pages allocated within an extent [begin,end] to the free list.
|
||||
func (f *freelist) releaseRange(begin, end txid) {
|
||||
if begin > end {
|
||||
return
|
||||
}
|
||||
var m pgids
|
||||
for tid, txp := range f.pending {
|
||||
if tid < begin || tid > end {
|
||||
continue
|
||||
}
|
||||
// Don't recompute freed pages if ranges haven't updated.
|
||||
if txp.lastReleaseBegin == begin {
|
||||
continue
|
||||
}
|
||||
for i := 0; i < len(txp.ids); i++ {
|
||||
if atx := txp.alloctx[i]; atx < begin || atx > end {
|
||||
continue
|
||||
}
|
||||
m = append(m, txp.ids[i])
|
||||
txp.ids[i] = txp.ids[len(txp.ids)-1]
|
||||
txp.ids = txp.ids[:len(txp.ids)-1]
|
||||
txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1]
|
||||
txp.alloctx = txp.alloctx[:len(txp.alloctx)-1]
|
||||
i--
|
||||
}
|
||||
txp.lastReleaseBegin = begin
|
||||
if len(txp.ids) == 0 {
|
||||
delete(f.pending, tid)
|
||||
}
|
||||
}
|
||||
@@ -142,12 +201,29 @@ func (f *freelist) release(txid txid) {
|
||||
// rollback removes the pages from a given pending tx.
|
||||
func (f *freelist) rollback(txid txid) {
|
||||
// Remove page ids from cache.
|
||||
for _, id := range f.pending[txid] {
|
||||
delete(f.cache, id)
|
||||
txp := f.pending[txid]
|
||||
if txp == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Remove pages from pending list.
|
||||
var m pgids
|
||||
for i, pgid := range txp.ids {
|
||||
delete(f.cache, pgid)
|
||||
tx := txp.alloctx[i]
|
||||
if tx == 0 {
|
||||
continue
|
||||
}
|
||||
if tx != txid {
|
||||
// Pending free aborted; restore page back to alloc list.
|
||||
f.allocs[pgid] = tx
|
||||
} else {
|
||||
// Freed page was allocated by this txn; OK to throw away.
|
||||
m = append(m, pgid)
|
||||
}
|
||||
}
|
||||
// Remove pages from pending list and mark as free if allocated by txid.
|
||||
delete(f.pending, txid)
|
||||
sort.Sort(m)
|
||||
f.ids = pgids(f.ids).merge(m)
|
||||
}
|
||||
|
||||
// freed returns whether a given page is in the free list.
|
||||
@@ -157,6 +233,9 @@ func (f *freelist) freed(pgid pgid) bool {
|
||||
|
||||
// read initializes the freelist from a freelist page.
|
||||
func (f *freelist) read(p *page) {
|
||||
if (p.flags & freelistPageFlag) == 0 {
|
||||
panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ()))
|
||||
}
|
||||
// If the page.count is at the max uint16 value (64k) then it's considered
|
||||
// an overflow and the size of the freelist is stored as the first element.
|
||||
idx, count := 0, int(p.count)
|
||||
@@ -169,7 +248,7 @@ func (f *freelist) read(p *page) {
|
||||
if count == 0 {
|
||||
f.ids = nil
|
||||
} else {
|
||||
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
|
||||
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx : idx+count]
|
||||
f.ids = make([]pgid, len(ids))
|
||||
copy(f.ids, ids)
|
||||
|
||||
@@ -181,27 +260,33 @@ func (f *freelist) read(p *page) {
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
// read initializes the freelist from a given list of ids.
|
||||
func (f *freelist) readIDs(ids []pgid) {
|
||||
f.ids = ids
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
// write writes the page ids onto a freelist page. All free and pending ids are
|
||||
// saved to disk since in the event of a program crash, all pending ids will
|
||||
// become free.
|
||||
func (f *freelist) write(p *page) error {
|
||||
// Combine the old free pgids and pgids waiting on an open transaction.
|
||||
ids := f.all()
|
||||
|
||||
// Update the header flag.
|
||||
p.flags |= freelistPageFlag
|
||||
|
||||
// The page.count can only hold up to 64k elements so if we overflow that
|
||||
// number then we handle it by putting the size in the first element.
|
||||
if len(ids) == 0 {
|
||||
p.count = uint16(len(ids))
|
||||
} else if len(ids) < 0xFFFF {
|
||||
p.count = uint16(len(ids))
|
||||
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids)
|
||||
lenids := f.count()
|
||||
if lenids == 0 {
|
||||
p.count = uint16(lenids)
|
||||
} else if lenids < 0xFFFF {
|
||||
p.count = uint16(lenids)
|
||||
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
|
||||
} else {
|
||||
p.count = 0xFFFF
|
||||
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids))
|
||||
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids)
|
||||
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
|
||||
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -213,8 +298,8 @@ func (f *freelist) reload(p *page) {
|
||||
|
||||
// Build a cache of only pending pages.
|
||||
pcache := make(map[pgid]bool)
|
||||
for _, pendingIDs := range f.pending {
|
||||
for _, pendingID := range pendingIDs {
|
||||
for _, txp := range f.pending {
|
||||
for _, pendingID := range txp.ids {
|
||||
pcache[pendingID] = true
|
||||
}
|
||||
}
|
||||
@@ -236,12 +321,12 @@ func (f *freelist) reload(p *page) {
|
||||
|
||||
// reindex rebuilds the free cache based on available and pending free lists.
|
||||
func (f *freelist) reindex() {
|
||||
f.cache = make(map[pgid]bool)
|
||||
f.cache = make(map[pgid]bool, len(f.ids))
|
||||
for _, id := range f.ids {
|
||||
f.cache[id] = true
|
||||
}
|
||||
for _, pendingIDs := range f.pending {
|
||||
for _, pendingID := range pendingIDs {
|
||||
for _, txp := range f.pending {
|
||||
for _, pendingID := range txp.ids {
|
||||
f.cache[pendingID] = true
|
||||
}
|
||||
}
|
@@ -365,7 +365,7 @@ func (n *node) spill() error {
|
||||
}
|
||||
|
||||
// Allocate contiguous space for the node.
|
||||
p, err := tx.allocate((node.size() / tx.db.pageSize) + 1)
|
||||
p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
31
cmd/vendor/github.com/boltdb/bolt/page.go → cmd/vendor/github.com/coreos/bbolt/page.go
generated
vendored
31
cmd/vendor/github.com/boltdb/bolt/page.go → cmd/vendor/github.com/coreos/bbolt/page.go
generated
vendored
@@ -145,12 +145,33 @@ func (a pgids) merge(b pgids) pgids {
|
||||
// Return the opposite slice if one is nil.
|
||||
if len(a) == 0 {
|
||||
return b
|
||||
} else if len(b) == 0 {
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return a
|
||||
}
|
||||
merged := make(pgids, len(a)+len(b))
|
||||
mergepgids(merged, a, b)
|
||||
return merged
|
||||
}
|
||||
|
||||
// Create a list to hold all elements from both lists.
|
||||
merged := make(pgids, 0, len(a)+len(b))
|
||||
// mergepgids copies the sorted union of a and b into dst.
|
||||
// If dst is too small, it panics.
|
||||
func mergepgids(dst, a, b pgids) {
|
||||
if len(dst) < len(a)+len(b) {
|
||||
panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
|
||||
}
|
||||
// Copy in the opposite slice if one is nil.
|
||||
if len(a) == 0 {
|
||||
copy(dst, b)
|
||||
return
|
||||
}
|
||||
if len(b) == 0 {
|
||||
copy(dst, a)
|
||||
return
|
||||
}
|
||||
|
||||
// Merged will hold all elements from both lists.
|
||||
merged := dst[:0]
|
||||
|
||||
// Assign lead to the slice with a lower starting value, follow to the higher value.
|
||||
lead, follow := a, b
|
||||
@@ -172,7 +193,5 @@ func (a pgids) merge(b pgids) pgids {
|
||||
}
|
||||
|
||||
// Append what's left in follow.
|
||||
merged = append(merged, follow...)
|
||||
|
||||
return merged
|
||||
_ = append(merged, follow...)
|
||||
}
|
81
cmd/vendor/github.com/boltdb/bolt/tx.go → cmd/vendor/github.com/coreos/bbolt/tx.go
generated
vendored
81
cmd/vendor/github.com/boltdb/bolt/tx.go → cmd/vendor/github.com/coreos/bbolt/tx.go
generated
vendored
@@ -126,10 +126,7 @@ func (tx *Tx) DeleteBucket(name []byte) error {
|
||||
// the error is returned to the caller.
|
||||
func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
|
||||
return tx.root.ForEach(func(k, v []byte) error {
|
||||
if err := fn(k, tx.root.Bucket(k)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return fn(k, tx.root.Bucket(k))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -169,28 +166,18 @@ func (tx *Tx) Commit() error {
|
||||
// Free the old root bucket.
|
||||
tx.meta.root.root = tx.root.root
|
||||
|
||||
opgid := tx.meta.pgid
|
||||
|
||||
// Free the freelist and allocate new pages for it. This will overestimate
|
||||
// the size of the freelist but not underestimate the size (which would be bad).
|
||||
tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
|
||||
p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
|
||||
if err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
// Free the old freelist because commit writes out a fresh freelist.
|
||||
if tx.meta.freelist != pgidNoFreelist {
|
||||
tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
|
||||
}
|
||||
if err := tx.db.freelist.write(p); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
tx.meta.freelist = p.id
|
||||
|
||||
// If the high water mark has moved up then attempt to grow the database.
|
||||
if tx.meta.pgid > opgid {
|
||||
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
|
||||
tx.rollback()
|
||||
if !tx.db.NoFreelistSync {
|
||||
err := tx.commitFreelist()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
tx.meta.freelist = pgidNoFreelist
|
||||
}
|
||||
|
||||
// Write dirty pages to disk.
|
||||
@@ -235,6 +222,31 @@ func (tx *Tx) Commit() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tx *Tx) commitFreelist() error {
|
||||
// Allocate new pages for the new free list. This will overestimate
|
||||
// the size of the freelist but not underestimate the size (which would be bad).
|
||||
opgid := tx.meta.pgid
|
||||
p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
|
||||
if err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
if err := tx.db.freelist.write(p); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
tx.meta.freelist = p.id
|
||||
// If the high water mark has moved up then attempt to grow the database.
|
||||
if tx.meta.pgid > opgid {
|
||||
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rollback closes the transaction and ignores all previous updates. Read-only
|
||||
// transactions must be rolled back and not committed.
|
||||
func (tx *Tx) Rollback() error {
|
||||
@@ -305,7 +317,11 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() { _ = f.Close() }()
|
||||
defer func() {
|
||||
if cerr := f.Close(); err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
// Generate a meta page. We use the same page data for both meta pages.
|
||||
buf := make([]byte, tx.db.pageSize)
|
||||
@@ -333,7 +349,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
|
||||
}
|
||||
|
||||
// Move past the meta pages in the file.
|
||||
if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil {
|
||||
if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil {
|
||||
return n, fmt.Errorf("seek: %s", err)
|
||||
}
|
||||
|
||||
@@ -344,7 +360,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
|
||||
return n, err
|
||||
}
|
||||
|
||||
return n, f.Close()
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// CopyFile copies the entire database to file at the given path.
|
||||
@@ -379,9 +395,14 @@ func (tx *Tx) Check() <-chan error {
|
||||
}
|
||||
|
||||
func (tx *Tx) check(ch chan error) {
|
||||
// Force loading free list if opened in ReadOnly mode.
|
||||
tx.db.loadFreelist()
|
||||
|
||||
// Check if any pages are double freed.
|
||||
freed := make(map[pgid]bool)
|
||||
for _, id := range tx.db.freelist.all() {
|
||||
all := make([]pgid, tx.db.freelist.count())
|
||||
tx.db.freelist.copyall(all)
|
||||
for _, id := range all {
|
||||
if freed[id] {
|
||||
ch <- fmt.Errorf("page %d: already freed", id)
|
||||
}
|
||||
@@ -392,8 +413,10 @@ func (tx *Tx) check(ch chan error) {
|
||||
reachable := make(map[pgid]*page)
|
||||
reachable[0] = tx.page(0) // meta0
|
||||
reachable[1] = tx.page(1) // meta1
|
||||
for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
|
||||
reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
|
||||
if tx.meta.freelist != pgidNoFreelist {
|
||||
for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
|
||||
reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively check buckets.
|
||||
@@ -451,7 +474,7 @@ func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bo
|
||||
|
||||
// allocate returns a contiguous block of memory starting at a given page.
|
||||
func (tx *Tx) allocate(count int) (*page, error) {
|
||||
p, err := tx.db.allocate(count)
|
||||
p, err := tx.db.allocate(tx.meta.txid, count)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
@@ -74,7 +74,7 @@ func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (st
|
||||
shortHost := strings.TrimSuffix(srv.Target, ".")
|
||||
urlHost := net.JoinHostPort(shortHost, port)
|
||||
stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost))
|
||||
plog.Noticef("got bootstrap from DNS for %s at %s%s", service, scheme, urlHost)
|
||||
plog.Noticef("got bootstrap from DNS for %s at %s://%s", service, scheme, urlHost)
|
||||
if ok && url.Scheme != scheme {
|
||||
plog.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String())
|
||||
}
|
||||
|
@@ -23,6 +23,7 @@ import (
|
||||
|
||||
"github.com/coreos/etcd/pkg/fileutil"
|
||||
"github.com/coreos/etcd/pkg/testutil"
|
||||
"github.com/coreos/etcd/version"
|
||||
)
|
||||
|
||||
// TestReleaseUpgrade ensures that changes to master branch does not affect
|
||||
@@ -53,7 +54,7 @@ func TestReleaseUpgrade(t *testing.T) {
|
||||
// so there's a window at boot time where it doesn't have V3rpcCapability enabled
|
||||
// poll /version until etcdcluster is >2.3.x before making v3 requests
|
||||
for i := 0; i < 7; i++ {
|
||||
if err = cURLGet(epc, cURLReq{endpoint: "/version", expected: `"etcdcluster":"3.0`}); err != nil {
|
||||
if err = cURLGet(epc, cURLReq{endpoint: "/version", expected: `"etcdcluster":"` + version.Cluster(version.Version)}); err != nil {
|
||||
t.Logf("#%d: v3 is not ready yet (%v)", i, err)
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
|
@@ -55,20 +55,12 @@ var (
|
||||
DefaultInitialAdvertisePeerURLs = "http://localhost:2380"
|
||||
DefaultAdvertiseClientURLs = "http://localhost:2379"
|
||||
|
||||
defaultHostname string = "localhost"
|
||||
defaultHostname string
|
||||
defaultHostStatus error
|
||||
)
|
||||
|
||||
func init() {
|
||||
ip, err := netutil.GetDefaultHost()
|
||||
if err != nil {
|
||||
defaultHostStatus = err
|
||||
return
|
||||
}
|
||||
// found default host, advertise on it
|
||||
DefaultInitialAdvertisePeerURLs = "http://" + ip + ":2380"
|
||||
DefaultAdvertiseClientURLs = "http://" + ip + ":2379"
|
||||
defaultHostname = ip
|
||||
defaultHostname, defaultHostStatus = netutil.GetDefaultHost()
|
||||
}
|
||||
|
||||
// Config holds the arguments for configuring an etcd server.
|
||||
@@ -237,6 +229,9 @@ func (cfg *configYAML) configFromFile(path string) error {
|
||||
cfg.ACUrls = []url.URL(u)
|
||||
}
|
||||
|
||||
if (cfg.Durl != "" || cfg.DNSCluster != "") && cfg.InitialCluster == cfg.InitialClusterFromName(cfg.Name) {
|
||||
cfg.InitialCluster = ""
|
||||
}
|
||||
if cfg.ClusterState == "" {
|
||||
cfg.ClusterState = ClusterStateFlagNew
|
||||
}
|
||||
@@ -346,34 +341,52 @@ func (cfg Config) InitialClusterFromName(name string) (ret string) {
|
||||
func (cfg Config) IsNewCluster() bool { return cfg.ClusterState == ClusterStateFlagNew }
|
||||
func (cfg Config) ElectionTicks() int { return int(cfg.ElectionMs / cfg.TickMs) }
|
||||
|
||||
// IsDefaultHost returns the default hostname, if used, and the error, if any,
|
||||
// from getting the machine's default host.
|
||||
func (cfg Config) IsDefaultHost() (string, error) {
|
||||
if len(cfg.APUrls) == 1 && cfg.APUrls[0].String() == DefaultInitialAdvertisePeerURLs {
|
||||
return defaultHostname, defaultHostStatus
|
||||
}
|
||||
if len(cfg.ACUrls) == 1 && cfg.ACUrls[0].String() == DefaultAdvertiseClientURLs {
|
||||
return defaultHostname, defaultHostStatus
|
||||
}
|
||||
return "", defaultHostStatus
|
||||
func (cfg Config) defaultPeerHost() bool {
|
||||
return len(cfg.APUrls) == 1 && cfg.APUrls[0].String() == DefaultInitialAdvertisePeerURLs
|
||||
}
|
||||
|
||||
// UpdateDefaultClusterFromName updates cluster advertise URLs with default host.
|
||||
func (cfg Config) defaultClientHost() bool {
|
||||
return len(cfg.ACUrls) == 1 && cfg.ACUrls[0].String() == DefaultAdvertiseClientURLs
|
||||
}
|
||||
|
||||
// UpdateDefaultClusterFromName updates cluster advertise URLs with, if available, default host,
|
||||
// if advertise URLs are default values(localhost:2379,2380) AND if listen URL is 0.0.0.0.
|
||||
// e.g. advertise peer URL localhost:2380 or listen peer URL 0.0.0.0:2380
|
||||
// then the advertise peer host would be updated with machine's default host,
|
||||
// while keeping the listen URL's port.
|
||||
// User can work around this by explicitly setting URL with 127.0.0.1.
|
||||
// It returns the default hostname, if used, and the error, if any, from getting the machine's default host.
|
||||
// TODO: check whether fields are set instead of whether fields have default value
|
||||
func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) {
|
||||
defaultHost, defaultHostErr := cfg.IsDefaultHost()
|
||||
defaultHostOverride := defaultHost == "" || defaultHostErr == nil
|
||||
if (defaultHostOverride || cfg.Name != DefaultName) && cfg.InitialCluster == defaultInitialCluster {
|
||||
cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
|
||||
ip, _, _ := net.SplitHostPort(cfg.LCUrls[0].Host)
|
||||
// if client-listen-url is 0.0.0.0, just use detected default host
|
||||
// otherwise, rewrite advertise-client-url with localhost
|
||||
if ip != "0.0.0.0" {
|
||||
_, acPort, _ := net.SplitHostPort(cfg.ACUrls[0].Host)
|
||||
cfg.ACUrls[0] = url.URL{Scheme: cfg.ACUrls[0].Scheme, Host: fmt.Sprintf("localhost:%s", acPort)}
|
||||
func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (string, error) {
|
||||
if defaultHostname == "" || defaultHostStatus != nil {
|
||||
// update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc')
|
||||
if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster {
|
||||
cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
|
||||
}
|
||||
return "", defaultHostStatus
|
||||
}
|
||||
|
||||
used := false
|
||||
pip, pport, _ := net.SplitHostPort(cfg.LPUrls[0].Host)
|
||||
if cfg.defaultPeerHost() && pip == "0.0.0.0" {
|
||||
cfg.APUrls[0] = url.URL{Scheme: cfg.APUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, pport)}
|
||||
used = true
|
||||
}
|
||||
// update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc')
|
||||
if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster {
|
||||
cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
|
||||
}
|
||||
|
||||
cip, cport, _ := net.SplitHostPort(cfg.LCUrls[0].Host)
|
||||
if cfg.defaultClientHost() && cip == "0.0.0.0" {
|
||||
cfg.ACUrls[0] = url.URL{Scheme: cfg.ACUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, cport)}
|
||||
used = true
|
||||
}
|
||||
dhost := defaultHostname
|
||||
if !used {
|
||||
dhost = ""
|
||||
}
|
||||
return dhost, defaultHostStatus
|
||||
}
|
||||
|
||||
// checkBindURLs returns an error if any URL uses a domain name.
|
||||
|
@@ -15,11 +15,15 @@
|
||||
package embed
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
)
|
||||
|
||||
@@ -61,6 +65,70 @@ func TestConfigFileOtherFields(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpdateDefaultClusterFromName ensures that etcd can start with 'etcd --name=abc'.
|
||||
func TestUpdateDefaultClusterFromName(t *testing.T) {
|
||||
cfg := NewConfig()
|
||||
defaultInitialCluster := cfg.InitialCluster
|
||||
oldscheme := cfg.APUrls[0].Scheme
|
||||
origpeer := cfg.APUrls[0].String()
|
||||
origadvc := cfg.ACUrls[0].String()
|
||||
|
||||
cfg.Name = "abc"
|
||||
_, lpport, _ := net.SplitHostPort(cfg.LPUrls[0].Host)
|
||||
|
||||
// in case of 'etcd --name=abc'
|
||||
exp := fmt.Sprintf("%s=%s://localhost:%s", cfg.Name, oldscheme, lpport)
|
||||
cfg.UpdateDefaultClusterFromName(defaultInitialCluster)
|
||||
if exp != cfg.InitialCluster {
|
||||
t.Fatalf("initial-cluster expected %q, got %q", exp, cfg.InitialCluster)
|
||||
}
|
||||
// advertise peer URL should not be affected
|
||||
if origpeer != cfg.APUrls[0].String() {
|
||||
t.Fatalf("advertise peer url expected %q, got %q", origadvc, cfg.APUrls[0].String())
|
||||
}
|
||||
// advertise client URL should not be affected
|
||||
if origadvc != cfg.ACUrls[0].String() {
|
||||
t.Fatalf("advertise client url expected %q, got %q", origadvc, cfg.ACUrls[0].String())
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpdateDefaultClusterFromNameOverwrite ensures that machine's default host is only used
|
||||
// if advertise URLs are default values(localhost:2379,2380) AND if listen URL is 0.0.0.0.
|
||||
func TestUpdateDefaultClusterFromNameOverwrite(t *testing.T) {
|
||||
if defaultHostname == "" {
|
||||
t.Skip("machine's default host not found")
|
||||
}
|
||||
|
||||
cfg := NewConfig()
|
||||
defaultInitialCluster := cfg.InitialCluster
|
||||
oldscheme := cfg.APUrls[0].Scheme
|
||||
origadvc := cfg.ACUrls[0].String()
|
||||
|
||||
cfg.Name = "abc"
|
||||
_, lpport, _ := net.SplitHostPort(cfg.LPUrls[0].Host)
|
||||
cfg.LPUrls[0] = url.URL{Scheme: cfg.LPUrls[0].Scheme, Host: fmt.Sprintf("0.0.0.0:%s", lpport)}
|
||||
dhost, _ := cfg.UpdateDefaultClusterFromName(defaultInitialCluster)
|
||||
if dhost != defaultHostname {
|
||||
t.Fatalf("expected default host %q, got %q", defaultHostname, dhost)
|
||||
}
|
||||
aphost, apport, _ := net.SplitHostPort(cfg.APUrls[0].Host)
|
||||
if apport != lpport {
|
||||
t.Fatalf("advertise peer url got different port %s, expected %s", apport, lpport)
|
||||
}
|
||||
if aphost != defaultHostname {
|
||||
t.Fatalf("advertise peer url expected machine default host %q, got %q", defaultHostname, aphost)
|
||||
}
|
||||
expected := fmt.Sprintf("%s=%s://%s:%s", cfg.Name, oldscheme, defaultHostname, lpport)
|
||||
if expected != cfg.InitialCluster {
|
||||
t.Fatalf("initial-cluster expected %q, got %q", expected, cfg.InitialCluster)
|
||||
}
|
||||
|
||||
// advertise client URL should not be affected
|
||||
if origadvc != cfg.ACUrls[0].String() {
|
||||
t.Fatalf("advertise-client-url expected %q, got %q", origadvc, cfg.ACUrls[0].String())
|
||||
}
|
||||
}
|
||||
|
||||
func (s *securityConfig) equals(t *transport.TLSInfo) bool {
|
||||
return s.CAFile == t.CAFile &&
|
||||
s.CertFile == t.CertFile &&
|
||||
|
@@ -19,7 +19,7 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v2http"
|
||||
@@ -166,7 +166,7 @@ func startPeerListeners(cfg *Config) (plns []net.Listener, err error) {
|
||||
for i, u := range cfg.LPUrls {
|
||||
phosts[i] = u.Host
|
||||
}
|
||||
cfg.PeerTLSInfo, err = transport.SelfCert(path.Join(cfg.Dir, "fixtures/peer"), phosts)
|
||||
cfg.PeerTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "peer"), phosts)
|
||||
if err != nil {
|
||||
plog.Fatalf("could not get certs (%v)", err)
|
||||
}
|
||||
@@ -221,7 +221,7 @@ func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) {
|
||||
for i, u := range cfg.LCUrls {
|
||||
chosts[i] = u.Host
|
||||
}
|
||||
cfg.ClientTLSInfo, err = transport.SelfCert(path.Join(cfg.Dir, "fixtures/client"), chosts)
|
||||
cfg.ClientTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "client"), chosts)
|
||||
if err != nil {
|
||||
plog.Fatalf("could not get certs (%v)", err)
|
||||
}
|
||||
|
@@ -15,7 +15,7 @@
|
||||
package embed
|
||||
|
||||
import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/coreos/etcd/wal"
|
||||
)
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
func isMemberInitialized(cfg *Config) bool {
|
||||
waldir := cfg.WalDir
|
||||
if waldir == "" {
|
||||
waldir = path.Join(cfg.Dir, "member", "wal")
|
||||
waldir = filepath.Join(cfg.Dir, "member", "wal")
|
||||
}
|
||||
|
||||
return wal.Exist(waldir)
|
||||
|
@@ -336,6 +336,6 @@ etcdctl is under the Apache 2.0 license. See the [LICENSE][license] file for det
|
||||
[authentication]: ../Documentation/v2/authentication.md
|
||||
[etcd]: https://github.com/coreos/etcd
|
||||
[github-release]: https://github.com/coreos/etcd/releases/
|
||||
[license]: https://github.com/coreos/etcdctl/blob/master/LICENSE
|
||||
[license]: ../LICENSE
|
||||
[semver]: http://semver.org/
|
||||
[username-flag]: #--username--u
|
||||
|
@@ -17,7 +17,7 @@ package command
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
@@ -50,19 +50,19 @@ func handleBackup(c *cli.Context) error {
|
||||
var srcWAL string
|
||||
var destWAL string
|
||||
|
||||
srcSnap := path.Join(c.String("data-dir"), "member", "snap")
|
||||
destSnap := path.Join(c.String("backup-dir"), "member", "snap")
|
||||
srcSnap := filepath.Join(c.String("data-dir"), "member", "snap")
|
||||
destSnap := filepath.Join(c.String("backup-dir"), "member", "snap")
|
||||
|
||||
if c.String("wal-dir") != "" {
|
||||
srcWAL = c.String("wal-dir")
|
||||
} else {
|
||||
srcWAL = path.Join(c.String("data-dir"), "member", "wal")
|
||||
srcWAL = filepath.Join(c.String("data-dir"), "member", "wal")
|
||||
}
|
||||
|
||||
if c.String("backup-wal-dir") != "" {
|
||||
destWAL = c.String("backup-wal-dir")
|
||||
} else {
|
||||
destWAL = path.Join(c.String("backup-dir"), "member", "wal")
|
||||
destWAL = filepath.Join(c.String("backup-dir"), "member", "wal")
|
||||
}
|
||||
|
||||
if err := fileutil.CreateDirAll(destSnap); err != nil {
|
||||
|
@@ -67,7 +67,7 @@ func leaseGrantCommandFunc(cmd *cobra.Command, args []string) {
|
||||
if err != nil {
|
||||
ExitWithError(ExitError, fmt.Errorf("failed to grant lease (%v)\n", err))
|
||||
}
|
||||
fmt.Printf("lease %016x granted with TTL(%ds)\n", resp.ID, resp.TTL)
|
||||
display.Grant(*resp)
|
||||
}
|
||||
|
||||
// NewLeaseRevokeCommand returns the cobra command for "lease revoke".
|
||||
@@ -90,12 +90,12 @@ func leaseRevokeCommandFunc(cmd *cobra.Command, args []string) {
|
||||
|
||||
id := leaseFromArgs(args[0])
|
||||
ctx, cancel := commandCtx(cmd)
|
||||
_, err := mustClientFromCmd(cmd).Revoke(ctx, id)
|
||||
resp, err := mustClientFromCmd(cmd).Revoke(ctx, id)
|
||||
cancel()
|
||||
if err != nil {
|
||||
ExitWithError(ExitError, fmt.Errorf("failed to revoke lease (%v)\n", err))
|
||||
}
|
||||
fmt.Printf("lease %016x revoked\n", id)
|
||||
display.Revoke(id, *resp)
|
||||
}
|
||||
|
||||
var timeToLiveKeys bool
|
||||
@@ -154,9 +154,12 @@ func leaseKeepAliveCommandFunc(cmd *cobra.Command, args []string) {
|
||||
}
|
||||
|
||||
for resp := range respc {
|
||||
fmt.Printf("lease %016x keepalived with TTL(%d)\n", resp.ID, resp.TTL)
|
||||
display.KeepAlive(*resp)
|
||||
}
|
||||
|
||||
if _, ok := (display).(*simplePrinter); ok {
|
||||
fmt.Printf("lease %016x expired or revoked.\n", id)
|
||||
}
|
||||
fmt.Printf("lease %016x expired or revoked.\n", id)
|
||||
}
|
||||
|
||||
func leaseFromArgs(arg string) v3.LeaseID {
|
||||
|
@@ -125,18 +125,19 @@ func makeMirror(ctx context.Context, c *clientv3.Client, dc *clientv3.Client) er
|
||||
return rpctypes.ErrCompacted
|
||||
}
|
||||
|
||||
var rev int64
|
||||
var lastRev int64
|
||||
ops := []clientv3.Op{}
|
||||
|
||||
for _, ev := range wr.Events {
|
||||
nrev := ev.Kv.ModRevision
|
||||
if rev != 0 && nrev > rev {
|
||||
nextRev := ev.Kv.ModRevision
|
||||
if lastRev != 0 && nextRev > lastRev {
|
||||
_, err := dc.Txn(ctx).Then(ops...).Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ops = []clientv3.Op{}
|
||||
}
|
||||
lastRev = nextRev
|
||||
switch ev.Type {
|
||||
case mvccpb.PUT:
|
||||
ops = append(ops, clientv3.OpPut(modifyPrefix(string(ev.Kv.Key)), string(ev.Kv.Value)))
|
||||
|
@@ -107,7 +107,8 @@ func memberAddCommandFunc(cmd *cobra.Command, args []string) {
|
||||
|
||||
urls := strings.Split(memberPeerURLs, ",")
|
||||
ctx, cancel := commandCtx(cmd)
|
||||
resp, err := mustClientFromCmd(cmd).MemberAdd(ctx, urls)
|
||||
cli := mustClientFromCmd(cmd)
|
||||
resp, err := cli.MemberAdd(ctx, urls)
|
||||
cancel()
|
||||
if err != nil {
|
||||
ExitWithError(ExitError, err)
|
||||
@@ -118,12 +119,24 @@ func memberAddCommandFunc(cmd *cobra.Command, args []string) {
|
||||
|
||||
if _, ok := (display).(*simplePrinter); ok {
|
||||
ctx, cancel = commandCtx(cmd)
|
||||
listResp, err := mustClientFromCmd(cmd).MemberList(ctx)
|
||||
cancel()
|
||||
|
||||
if err != nil {
|
||||
ExitWithError(ExitError, err)
|
||||
listResp, err := cli.MemberList(ctx)
|
||||
// get latest member list; if there's failover new member might have outdated list
|
||||
for {
|
||||
if err != nil {
|
||||
ExitWithError(ExitError, err)
|
||||
}
|
||||
if listResp.Header.MemberId == resp.Header.MemberId {
|
||||
break
|
||||
}
|
||||
// quorum get to sync cluster list
|
||||
gresp, gerr := cli.Get(ctx, "_")
|
||||
if gerr != nil {
|
||||
ExitWithError(ExitError, err)
|
||||
}
|
||||
resp.Header.MemberId = gresp.Header.MemberId
|
||||
listResp, err = cli.MemberList(ctx)
|
||||
}
|
||||
cancel()
|
||||
|
||||
conf := []string{}
|
||||
for _, memb := range listResp.Members {
|
||||
|
@@ -21,7 +21,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/client"
|
||||
@@ -103,7 +103,7 @@ func prepareBackend() backend.Backend {
|
||||
var be backend.Backend
|
||||
|
||||
bch := make(chan struct{})
|
||||
dbpath := path.Join(migrateDatadir, "member", "snap", "db")
|
||||
dbpath := filepath.Join(migrateDatadir, "member", "snap", "db")
|
||||
go func() {
|
||||
defer close(bch)
|
||||
be = backend.New(dbpath, time.Second, 10000)
|
||||
@@ -130,9 +130,9 @@ func rebuildStoreV2() (store.Store, uint64) {
|
||||
|
||||
waldir := migrateWALdir
|
||||
if len(waldir) == 0 {
|
||||
waldir = path.Join(migrateDatadir, "member", "wal")
|
||||
waldir = filepath.Join(migrateDatadir, "member", "wal")
|
||||
}
|
||||
snapdir := path.Join(migrateDatadir, "member", "snap")
|
||||
snapdir := filepath.Join(migrateDatadir, "member", "snap")
|
||||
|
||||
ss := snap.New(snapdir)
|
||||
snapshot, err := ss.Load()
|
||||
|
@@ -32,6 +32,9 @@ type printer interface {
|
||||
Txn(v3.TxnResponse)
|
||||
Watch(v3.WatchResponse)
|
||||
|
||||
Grant(r v3.LeaseGrantResponse)
|
||||
Revoke(id v3.LeaseID, r v3.LeaseRevokeResponse)
|
||||
KeepAlive(r v3.LeaseKeepAliveResponse)
|
||||
TimeToLive(r v3.LeaseTimeToLiveResponse, keys bool)
|
||||
|
||||
MemberAdd(v3.MemberAddResponse)
|
||||
@@ -81,13 +84,18 @@ type printerRPC struct {
|
||||
p func(interface{})
|
||||
}
|
||||
|
||||
func (p *printerRPC) Del(r v3.DeleteResponse) { p.p((*pb.DeleteRangeResponse)(&r)) }
|
||||
func (p *printerRPC) Get(r v3.GetResponse) { p.p((*pb.RangeResponse)(&r)) }
|
||||
func (p *printerRPC) Put(r v3.PutResponse) { p.p((*pb.PutResponse)(&r)) }
|
||||
func (p *printerRPC) Txn(r v3.TxnResponse) { p.p((*pb.TxnResponse)(&r)) }
|
||||
func (p *printerRPC) Watch(r v3.WatchResponse) { p.p(&r) }
|
||||
func (p *printerRPC) Del(r v3.DeleteResponse) { p.p((*pb.DeleteRangeResponse)(&r)) }
|
||||
func (p *printerRPC) Get(r v3.GetResponse) { p.p((*pb.RangeResponse)(&r)) }
|
||||
func (p *printerRPC) Put(r v3.PutResponse) { p.p((*pb.PutResponse)(&r)) }
|
||||
func (p *printerRPC) Txn(r v3.TxnResponse) { p.p((*pb.TxnResponse)(&r)) }
|
||||
func (p *printerRPC) Watch(r v3.WatchResponse) { p.p(&r) }
|
||||
|
||||
func (p *printerRPC) Grant(r v3.LeaseGrantResponse) { p.p(r) }
|
||||
func (p *printerRPC) Revoke(id v3.LeaseID, r v3.LeaseRevokeResponse) { p.p(r) }
|
||||
func (p *printerRPC) KeepAlive(r v3.LeaseKeepAliveResponse) { p.p(r) }
|
||||
func (p *printerRPC) TimeToLive(r v3.LeaseTimeToLiveResponse, keys bool) { p.p(&r) }
|
||||
func (p *printerRPC) MemberAdd(r v3.MemberAddResponse) { p.p((*pb.MemberAddResponse)(&r)) }
|
||||
|
||||
func (p *printerRPC) MemberAdd(r v3.MemberAddResponse) { p.p((*pb.MemberAddResponse)(&r)) }
|
||||
func (p *printerRPC) MemberRemove(id uint64, r v3.MemberRemoveResponse) {
|
||||
p.p((*pb.MemberRemoveResponse)(&r))
|
||||
}
|
||||
|
@@ -30,7 +30,7 @@ func (p *fieldsPrinter) kv(pfx string, kv *spb.KeyValue) {
|
||||
fmt.Printf("\"%sModRevision\" : %d\n", pfx, kv.ModRevision)
|
||||
fmt.Printf("\"%sVersion\" : %d\n", pfx, kv.Version)
|
||||
fmt.Printf("\"%sValue\" : %q\n", pfx, string(kv.Value))
|
||||
fmt.Printf("\"%sLease\" : %d\n", pfx, string(kv.Lease))
|
||||
fmt.Printf("\"%sLease\" : %d\n", pfx, kv.Lease)
|
||||
}
|
||||
|
||||
func (p *fieldsPrinter) hdr(h *pb.ResponseHeader) {
|
||||
@@ -92,6 +92,22 @@ func (p *fieldsPrinter) Watch(resp v3.WatchResponse) {
|
||||
}
|
||||
}
|
||||
|
||||
func (p *fieldsPrinter) Grant(r v3.LeaseGrantResponse) {
|
||||
p.hdr(r.ResponseHeader)
|
||||
fmt.Println(`"ID" :`, r.ID)
|
||||
fmt.Println(`"TTL" :`, r.TTL)
|
||||
}
|
||||
|
||||
func (p *fieldsPrinter) Revoke(id v3.LeaseID, r v3.LeaseRevokeResponse) {
|
||||
p.hdr(r.Header)
|
||||
}
|
||||
|
||||
func (p *fieldsPrinter) KeepAlive(r v3.LeaseKeepAliveResponse) {
|
||||
p.hdr(r.ResponseHeader)
|
||||
fmt.Println(`"ID" :`, r.ID)
|
||||
fmt.Println(`"TTL" :`, r.TTL)
|
||||
}
|
||||
|
||||
func (p *fieldsPrinter) TimeToLive(r v3.LeaseTimeToLiveResponse, keys bool) {
|
||||
p.hdr(r.ResponseHeader)
|
||||
fmt.Println(`"ID" :`, r.ID)
|
||||
|
@@ -79,6 +79,18 @@ func (s *simplePrinter) Watch(resp v3.WatchResponse) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *simplePrinter) Grant(resp v3.LeaseGrantResponse) {
|
||||
fmt.Printf("lease %016x granted with TTL(%ds)\n", resp.ID, resp.TTL)
|
||||
}
|
||||
|
||||
func (p *simplePrinter) Revoke(id v3.LeaseID, r v3.LeaseRevokeResponse) {
|
||||
fmt.Printf("lease %016x revoked\n", id)
|
||||
}
|
||||
|
||||
func (p *simplePrinter) KeepAlive(resp v3.LeaseKeepAliveResponse) {
|
||||
fmt.Printf("lease %016x keepalived with TTL(%d)\n", resp.ID, resp.TTL)
|
||||
}
|
||||
|
||||
func (s *simplePrinter) TimeToLive(resp v3.LeaseTimeToLiveResponse, keys bool) {
|
||||
txt := fmt.Sprintf("lease %016x granted with TTL(%ds), remaining(%ds)", resp.ID, resp.GrantedTTL, resp.TTL)
|
||||
if keys {
|
||||
|
@@ -23,11 +23,11 @@ import (
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
bolt "github.com/coreos/bbolt"
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
@@ -186,8 +186,8 @@ func snapshotRestoreCommandFunc(cmd *cobra.Command, args []string) {
|
||||
basedir = restoreName + ".etcd"
|
||||
}
|
||||
|
||||
waldir := path.Join(basedir, "member", "wal")
|
||||
snapdir := path.Join(basedir, "member", "snap")
|
||||
waldir := filepath.Join(basedir, "member", "wal")
|
||||
snapdir := filepath.Join(basedir, "member", "snap")
|
||||
|
||||
if _, err := os.Stat(basedir); err == nil {
|
||||
ExitWithError(ExitInvalidInput, fmt.Errorf("data-dir %q exists", basedir))
|
||||
@@ -325,7 +325,7 @@ func makeDB(snapdir, dbfile string, commit int) {
|
||||
ExitWithError(ExitIO, err)
|
||||
}
|
||||
|
||||
dbpath := path.Join(snapdir, "db")
|
||||
dbpath := filepath.Join(snapdir, "db")
|
||||
db, dberr := os.OpenFile(dbpath, os.O_RDWR|os.O_CREATE, 0600)
|
||||
if dberr != nil {
|
||||
ExitWithError(ExitIO, dberr)
|
||||
|
@@ -45,7 +45,7 @@ var (
|
||||
func init() {
|
||||
rootCmd.PersistentFlags().StringSliceVar(&globalFlags.Endpoints, "endpoints", []string{"127.0.0.1:2379"}, "gRPC endpoints")
|
||||
|
||||
rootCmd.PersistentFlags().StringVarP(&globalFlags.OutputFormat, "write-out", "w", "simple", "set the output format (fields, json, proto, simple, table)")
|
||||
rootCmd.PersistentFlags().StringVarP(&globalFlags.OutputFormat, "write-out", "w", "simple", "set the output format (fields, json, protobuf, simple, table)")
|
||||
rootCmd.PersistentFlags().BoolVar(&globalFlags.IsHex, "hex", false, "print byte strings as hex encoded strings")
|
||||
|
||||
rootCmd.PersistentFlags().DurationVar(&globalFlags.DialTimeout, "dial-timeout", defaultDialTimeout, "dial timeout for client connections")
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -39,8 +39,6 @@ import (
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/etcd/proxy/httpproxy"
|
||||
"github.com/coreos/etcd/version"
|
||||
"github.com/coreos/go-systemd/daemon"
|
||||
systemdutil "github.com/coreos/go-systemd/util"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@@ -85,7 +83,13 @@ func startEtcdOrProxyV2() {
|
||||
GoMaxProcs := runtime.GOMAXPROCS(0)
|
||||
plog.Infof("setting maximum number of CPUs to %d, total number of available CPUs is %d", GoMaxProcs, runtime.NumCPU())
|
||||
|
||||
(&cfg.Config).UpdateDefaultClusterFromName(defaultInitialCluster)
|
||||
defaultHost, dhErr := (&cfg.Config).UpdateDefaultClusterFromName(defaultInitialCluster)
|
||||
if defaultHost != "" {
|
||||
plog.Infof("advertising using detected default host %q", defaultHost)
|
||||
}
|
||||
if dhErr != nil {
|
||||
plog.Noticef("failed to detect default host (%v)", dhErr)
|
||||
}
|
||||
|
||||
if cfg.Dir == "" {
|
||||
cfg.Dir = fmt.Sprintf("%v.etcd", cfg.Name)
|
||||
@@ -157,20 +161,12 @@ func startEtcdOrProxyV2() {
|
||||
|
||||
osutil.HandleInterrupts()
|
||||
|
||||
if systemdutil.IsRunningSystemd() {
|
||||
// At this point, the initialization of etcd is done.
|
||||
// The listeners are listening on the TCP ports and ready
|
||||
// for accepting connections. The etcd instance should be
|
||||
// joined with the cluster and ready to serve incoming
|
||||
// connections.
|
||||
sent, err := daemon.SdNotify(false, "READY=1")
|
||||
if err != nil {
|
||||
plog.Errorf("failed to notify systemd for readiness: %v", err)
|
||||
}
|
||||
if !sent {
|
||||
plog.Errorf("forgot to set Type=notify in systemd service file?")
|
||||
}
|
||||
}
|
||||
// At this point, the initialization of etcd is done.
|
||||
// The listeners are listening on the TCP ports and ready
|
||||
// for accepting connections. The etcd instance should be
|
||||
// joined with the cluster and ready to serve incoming
|
||||
// connections.
|
||||
notifySystemd()
|
||||
|
||||
select {
|
||||
case lerr := <-errc:
|
||||
@@ -184,15 +180,6 @@ func startEtcdOrProxyV2() {
|
||||
|
||||
// startEtcd runs StartEtcd in addition to hooks needed for standalone etcd.
|
||||
func startEtcd(cfg *embed.Config) (<-chan struct{}, <-chan error, error) {
|
||||
defaultHost, dhErr := cfg.IsDefaultHost()
|
||||
if defaultHost != "" {
|
||||
if dhErr == nil {
|
||||
plog.Infof("advertising using detected default host %q", defaultHost)
|
||||
} else {
|
||||
plog.Noticef("failed to detect default host, advertise falling back to %q (%v)", defaultHost, dhErr)
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.Metrics == "extensive" {
|
||||
grpc_prometheus.EnableHandlingTimeHistogram()
|
||||
}
|
||||
@@ -202,7 +189,10 @@ func startEtcd(cfg *embed.Config) (<-chan struct{}, <-chan error, error) {
|
||||
return nil, nil, err
|
||||
}
|
||||
osutil.RegisterInterruptHandler(e.Server.Stop)
|
||||
<-e.Server.ReadyNotify() // wait for e.Server to join the cluster
|
||||
select {
|
||||
case <-e.Server.ReadyNotify(): // wait for e.Server to join the cluster
|
||||
case <-e.Server.StopNotify(): // publish aborted from 'ErrStopped'
|
||||
}
|
||||
return e.Server.StopNotify(), e.Err(), nil
|
||||
}
|
||||
|
||||
@@ -221,14 +211,14 @@ func startProxy(cfg *config) error {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.Dir = path.Join(cfg.Dir, "proxy")
|
||||
cfg.Dir = filepath.Join(cfg.Dir, "proxy")
|
||||
err = os.MkdirAll(cfg.Dir, fileutil.PrivateDirMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var peerURLs []string
|
||||
clusterfile := path.Join(cfg.Dir, "cluster")
|
||||
clusterfile := filepath.Join(cfg.Dir, "cluster")
|
||||
|
||||
b, err := ioutil.ReadFile(clusterfile)
|
||||
switch {
|
||||
|
@@ -17,12 +17,14 @@ package etcdmain
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/client"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
"github.com/coreos/etcd/proxy/tcpproxy"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -77,6 +79,20 @@ func newGatewayStartCommand() *cobra.Command {
|
||||
return &cmd
|
||||
}
|
||||
|
||||
func stripSchema(eps []string) []string {
|
||||
var endpoints []string
|
||||
|
||||
for _, ep := range eps {
|
||||
|
||||
if u, err := url.Parse(ep); err == nil && u.Host != "" {
|
||||
ep = u.Host
|
||||
}
|
||||
|
||||
endpoints = append(endpoints, ep)
|
||||
}
|
||||
|
||||
return endpoints
|
||||
}
|
||||
func startGateway(cmd *cobra.Command, args []string) {
|
||||
endpoints := gatewayEndpoints
|
||||
if gatewayDNSCluster != "" {
|
||||
@@ -101,6 +117,9 @@ func startGateway(cmd *cobra.Command, args []string) {
|
||||
}
|
||||
}
|
||||
|
||||
// Strip the schema from the endpoints because we start just a TCP proxy
|
||||
endpoints = stripSchema(endpoints)
|
||||
|
||||
if len(endpoints) == 0 {
|
||||
plog.Fatalf("no endpoints found")
|
||||
}
|
||||
@@ -117,5 +136,8 @@ func startGateway(cmd *cobra.Command, args []string) {
|
||||
MonitorInterval: getewayRetryDelay,
|
||||
}
|
||||
|
||||
// At this point, etcd gateway listener is initialized
|
||||
notifySystemd()
|
||||
|
||||
tp.Run()
|
||||
}
|
||||
|
@@ -144,6 +144,9 @@ func startGRPCProxy(cmd *cobra.Command, args []string) {
|
||||
|
||||
go func() { errc <- m.Serve() }()
|
||||
|
||||
// grpc-proxy is initialized, ready to serve
|
||||
notifySystemd()
|
||||
|
||||
fmt.Fprintln(os.Stderr, <-errc)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
@@ -17,6 +17,9 @@ package etcdmain
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/coreos/go-systemd/daemon"
|
||||
systemdutil "github.com/coreos/go-systemd/util"
|
||||
)
|
||||
|
||||
func Main() {
|
||||
@@ -35,3 +38,16 @@ func Main() {
|
||||
|
||||
startEtcdOrProxyV2()
|
||||
}
|
||||
|
||||
func notifySystemd() {
|
||||
if !systemdutil.IsRunningSystemd() {
|
||||
return
|
||||
}
|
||||
sent, err := daemon.SdNotify(false, "READY=1")
|
||||
if err != nil {
|
||||
plog.Errorf("failed to notify systemd for readiness: %v", err)
|
||||
}
|
||||
if !sent {
|
||||
plog.Errorf("forgot to set Type=notify in systemd service file?")
|
||||
}
|
||||
}
|
||||
|
@@ -185,9 +185,5 @@ func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
|
||||
if err := ams.isAuthenticated(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ams.maintenanceServer.Status(ctx, ar)
|
||||
}
|
||||
|
@@ -520,15 +520,14 @@ func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantR
|
||||
if err == nil {
|
||||
resp.ID = int64(l.ID)
|
||||
resp.TTL = l.TTL()
|
||||
resp.Header = &pb.ResponseHeader{Revision: a.s.KV().Rev()}
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
|
||||
err := a.s.lessor.Revoke(lease.LeaseID(lc.ID))
|
||||
return &pb.LeaseRevokeResponse{Header: &pb.ResponseHeader{Revision: a.s.KV().Rev()}}, err
|
||||
return &pb.LeaseRevokeResponse{Header: newHeader(a.s)}, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
|
||||
@@ -609,69 +608,125 @@ func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pb.AuthEnableResponse{}, nil
|
||||
return &pb.AuthEnableResponse{Header: newHeader(a.s)}, nil
|
||||
}
|
||||
|
||||
func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) {
|
||||
a.s.AuthStore().AuthDisable()
|
||||
return &pb.AuthDisableResponse{}, nil
|
||||
return &pb.AuthDisableResponse{Header: newHeader(a.s)}, nil
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) {
|
||||
ctx := context.WithValue(context.WithValue(context.TODO(), "index", a.s.consistIndex.ConsistentIndex()), "simpleToken", r.SimpleToken)
|
||||
return a.s.AuthStore().Authenticate(ctx, r.Name, r.Password)
|
||||
ctx := context.WithValue(context.WithValue(context.Background(), "index", a.s.consistIndex.ConsistentIndex()), "simpleToken", r.SimpleToken)
|
||||
resp, err := a.s.AuthStore().Authenticate(ctx, r.Name, r.Password)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
|
||||
return a.s.AuthStore().UserAdd(r)
|
||||
resp, err := a.s.AuthStore().UserAdd(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
|
||||
return a.s.AuthStore().UserDelete(r)
|
||||
resp, err := a.s.AuthStore().UserDelete(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
|
||||
return a.s.AuthStore().UserChangePassword(r)
|
||||
resp, err := a.s.AuthStore().UserChangePassword(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
|
||||
return a.s.AuthStore().UserGrantRole(r)
|
||||
resp, err := a.s.AuthStore().UserGrantRole(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
|
||||
return a.s.AuthStore().UserGet(r)
|
||||
resp, err := a.s.AuthStore().UserGet(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
|
||||
return a.s.AuthStore().UserRevokeRole(r)
|
||||
resp, err := a.s.AuthStore().UserRevokeRole(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
|
||||
return a.s.AuthStore().RoleAdd(r)
|
||||
resp, err := a.s.AuthStore().RoleAdd(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
|
||||
return a.s.AuthStore().RoleGrantPermission(r)
|
||||
resp, err := a.s.AuthStore().RoleGrantPermission(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
|
||||
return a.s.AuthStore().RoleGet(r)
|
||||
resp, err := a.s.AuthStore().RoleGet(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
|
||||
return a.s.AuthStore().RoleRevokePermission(r)
|
||||
resp, err := a.s.AuthStore().RoleRevokePermission(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
|
||||
return a.s.AuthStore().RoleDelete(r)
|
||||
resp, err := a.s.AuthStore().RoleDelete(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
|
||||
return a.s.AuthStore().UserList(r)
|
||||
resp, err := a.s.AuthStore().UserList(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
|
||||
return a.s.AuthStore().RoleList(r)
|
||||
resp, err := a.s.AuthStore().RoleList(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type quotaApplierV3 struct {
|
||||
@@ -836,3 +891,12 @@ func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) {
|
||||
}
|
||||
rr.KVs = rr.KVs[:j]
|
||||
}
|
||||
|
||||
func newHeader(s *EtcdServer) *pb.ResponseHeader {
|
||||
return &pb.ResponseHeader{
|
||||
ClusterId: uint64(s.Cluster().ID()),
|
||||
MemberId: uint64(s.ID()),
|
||||
Revision: s.KV().Rev(),
|
||||
RaftTerm: s.Term(),
|
||||
}
|
||||
}
|
||||
|
@@ -16,7 +16,7 @@ package etcdserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -118,16 +118,16 @@ func (c *ServerConfig) advertiseMatchesCluster() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ServerConfig) MemberDir() string { return path.Join(c.DataDir, "member") }
|
||||
func (c *ServerConfig) MemberDir() string { return filepath.Join(c.DataDir, "member") }
|
||||
|
||||
func (c *ServerConfig) WALDir() string {
|
||||
if c.DedicatedWALDir != "" {
|
||||
return c.DedicatedWALDir
|
||||
}
|
||||
return path.Join(c.MemberDir(), "wal")
|
||||
return filepath.Join(c.MemberDir(), "wal")
|
||||
}
|
||||
|
||||
func (c *ServerConfig) SnapDir() string { return path.Join(c.MemberDir(), "snap") }
|
||||
func (c *ServerConfig) SnapDir() string { return filepath.Join(c.MemberDir(), "snap") }
|
||||
|
||||
func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" }
|
||||
|
||||
|
@@ -23,6 +23,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -263,7 +264,7 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
|
||||
}
|
||||
ss := snap.New(cfg.SnapDir())
|
||||
|
||||
bepath := path.Join(cfg.SnapDir(), databaseFilename)
|
||||
bepath := filepath.Join(cfg.SnapDir(), databaseFilename)
|
||||
beExist := fileutil.Exist(bepath)
|
||||
|
||||
var be backend.Backend
|
||||
@@ -594,6 +595,7 @@ func (s *EtcdServer) ReportSnapshot(id uint64, status raft.SnapshotStatus) {
|
||||
type etcdProgress struct {
|
||||
confState raftpb.ConfState
|
||||
snapi uint64
|
||||
appliedt uint64
|
||||
appliedi uint64
|
||||
}
|
||||
|
||||
@@ -606,7 +608,7 @@ type raftReadyHandler struct {
|
||||
}
|
||||
|
||||
func (s *EtcdServer) run() {
|
||||
snap, err := s.r.raftStorage.Snapshot()
|
||||
snapshot, err := s.r.raftStorage.Snapshot()
|
||||
if err != nil {
|
||||
plog.Panicf("get snapshot from raft storage error: %v", err)
|
||||
}
|
||||
@@ -664,9 +666,10 @@ func (s *EtcdServer) run() {
|
||||
// asynchronously accept apply packets, dispatch progress in-order
|
||||
sched := schedule.NewFIFOScheduler()
|
||||
ep := etcdProgress{
|
||||
confState: snap.Metadata.ConfState,
|
||||
snapi: snap.Metadata.Index,
|
||||
appliedi: snap.Metadata.Index,
|
||||
confState: snapshot.Metadata.ConfState,
|
||||
snapi: snapshot.Metadata.Index,
|
||||
appliedt: snapshot.Metadata.Term,
|
||||
appliedi: snapshot.Metadata.Index,
|
||||
}
|
||||
|
||||
defer func() {
|
||||
@@ -765,7 +768,7 @@ func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) {
|
||||
select {
|
||||
// snapshot requested via send()
|
||||
case m := <-s.r.msgSnapC:
|
||||
merged := s.createMergedSnapshotMessage(m, ep.appliedi, ep.confState)
|
||||
merged := s.createMergedSnapshotMessage(m, ep.appliedt, ep.appliedi, ep.confState)
|
||||
s.sendMergedSnap(merged)
|
||||
default:
|
||||
}
|
||||
@@ -789,7 +792,7 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
|
||||
plog.Panicf("get database snapshot file path error: %v", err)
|
||||
}
|
||||
|
||||
fn := path.Join(s.Cfg.SnapDir(), databaseFilename)
|
||||
fn := filepath.Join(s.Cfg.SnapDir(), databaseFilename)
|
||||
if err := os.Rename(snapfn, fn); err != nil {
|
||||
plog.Panicf("rename snapshot file error: %v", err)
|
||||
}
|
||||
@@ -867,6 +870,7 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
|
||||
}
|
||||
plog.Info("finished adding peers from new cluster configuration into network...")
|
||||
|
||||
ep.appliedt = apply.snapshot.Metadata.Term
|
||||
ep.appliedi = apply.snapshot.Metadata.Index
|
||||
ep.snapi = ep.appliedi
|
||||
ep.confState = apply.snapshot.Metadata.ConfState
|
||||
@@ -888,7 +892,7 @@ func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) {
|
||||
return
|
||||
}
|
||||
var shouldstop bool
|
||||
if ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop {
|
||||
if ep.appliedt, ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop {
|
||||
go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf("the member has been permanently removed from the cluster"))
|
||||
}
|
||||
}
|
||||
@@ -1242,28 +1246,32 @@ func (s *EtcdServer) sendMergedSnap(merged snap.Message) {
|
||||
// apply takes entries received from Raft (after it has been committed) and
|
||||
// applies them to the current state of the EtcdServer.
|
||||
// The given entries should not be empty.
|
||||
func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (uint64, bool) {
|
||||
var applied uint64
|
||||
var shouldstop bool
|
||||
func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (appliedt uint64, appliedi uint64, shouldStop bool) {
|
||||
for i := range es {
|
||||
e := es[i]
|
||||
switch e.Type {
|
||||
case raftpb.EntryNormal:
|
||||
s.applyEntryNormal(&e)
|
||||
case raftpb.EntryConfChange:
|
||||
// set the consistent index of current executing entry
|
||||
if e.Index > s.consistIndex.ConsistentIndex() {
|
||||
s.consistIndex.setConsistentIndex(e.Index)
|
||||
}
|
||||
var cc raftpb.ConfChange
|
||||
pbutil.MustUnmarshal(&cc, e.Data)
|
||||
removedSelf, err := s.applyConfChange(cc, confState)
|
||||
shouldstop = shouldstop || removedSelf
|
||||
s.setAppliedIndex(e.Index)
|
||||
shouldStop = shouldStop || removedSelf
|
||||
s.w.Trigger(cc.ID, err)
|
||||
default:
|
||||
plog.Panicf("entry type should be either EntryNormal or EntryConfChange")
|
||||
}
|
||||
atomic.StoreUint64(&s.r.index, e.Index)
|
||||
atomic.StoreUint64(&s.r.term, e.Term)
|
||||
applied = e.Index
|
||||
appliedt = e.Term
|
||||
appliedi = e.Index
|
||||
}
|
||||
return applied, shouldstop
|
||||
return appliedt, appliedi, shouldStop
|
||||
}
|
||||
|
||||
// applyEntryNormal apples an EntryNormal type raftpb request to the EtcdServer
|
||||
|
@@ -613,7 +613,7 @@ func TestApplyMultiConfChangeShouldStop(t *testing.T) {
|
||||
ents = append(ents, ent)
|
||||
}
|
||||
|
||||
_, shouldStop := srv.apply(ents, &raftpb.ConfState{})
|
||||
_, _, shouldStop := srv.apply(ents, &raftpb.ConfState{})
|
||||
if !shouldStop {
|
||||
t.Errorf("shouldStop = %t, want %t", shouldStop, true)
|
||||
}
|
||||
|
@@ -16,7 +16,6 @@ package etcdserver
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
|
||||
"github.com/coreos/etcd/mvcc/backend"
|
||||
"github.com/coreos/etcd/raft/raftpb"
|
||||
@@ -26,12 +25,7 @@ import (
|
||||
// createMergedSnapshotMessage creates a snapshot message that contains: raft status (term, conf),
|
||||
// a snapshot of v2 store inside raft.Snapshot as []byte, a snapshot of v3 KV in the top level message
|
||||
// as ReadCloser.
|
||||
func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapi uint64, confState raftpb.ConfState) snap.Message {
|
||||
snapt, err := s.r.raftStorage.Term(snapi)
|
||||
if err != nil {
|
||||
log.Panicf("get term should never fail: %v", err)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi uint64, confState raftpb.ConfState) snap.Message {
|
||||
// get a snapshot of v2 store as []byte
|
||||
clone := s.store.Clone()
|
||||
d, err := clone.SaveNoCopy()
|
||||
|
10
glide.lock
generated
10
glide.lock
generated
@@ -1,5 +1,5 @@
|
||||
hash: ca3c895fa60c9ca9f53408202fb7643705f9960212d342967ed0da8e93606cc4
|
||||
updated: 2017-01-18T10:26:48.990115455-08:00
|
||||
hash: 8a2307db167c2f9bc511956fbec0a4400bc38db103603fc6a87bb179ac853bf5
|
||||
updated: 2017-11-21T11:36:36.822426751-08:00
|
||||
imports:
|
||||
- name: github.com/beorn7/perks
|
||||
version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||
@@ -7,10 +7,10 @@ imports:
|
||||
- quantile
|
||||
- name: github.com/bgentry/speakeasy
|
||||
version: 36e9cfdd690967f4f690c6edcc9ffacd006014a0
|
||||
- name: github.com/boltdb/bolt
|
||||
version: 583e8937c61f1af6513608ccc75c97b6abdf4ff9
|
||||
- name: github.com/cockroachdb/cmux
|
||||
version: 112f0506e7743d64a6eb8fedbcff13d9979bbf92
|
||||
- name: github.com/coreos/bbolt
|
||||
version: 32c383e75ce054674c53b5a07e55de85332aee14
|
||||
- name: github.com/coreos/go-semver
|
||||
version: 568e959cd89871e61434c1143528d9162da89ef2
|
||||
subpackages:
|
||||
@@ -74,7 +74,7 @@ imports:
|
||||
subpackages:
|
||||
- prometheus
|
||||
- name: github.com/prometheus/client_model
|
||||
version: fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
version: 6f3806018612930941127f2a7c6c453ba2c527d2
|
||||
subpackages:
|
||||
- go
|
||||
- name: github.com/prometheus/common
|
||||
|
38
glide.yaml
38
glide.yaml
@@ -2,8 +2,8 @@ package: github.com/coreos/etcd
|
||||
import:
|
||||
- package: github.com/bgentry/speakeasy
|
||||
version: 36e9cfdd690967f4f690c6edcc9ffacd006014a0
|
||||
- package: github.com/boltdb/bolt
|
||||
version: v1.3.0
|
||||
- package: github.com/coreos/bbolt
|
||||
version: v1.3.1-coreos.5
|
||||
- package: github.com/cockroachdb/cmux
|
||||
version: 112f0506e7743d64a6eb8fedbcff13d9979bbf92
|
||||
- package: github.com/coreos/go-semver
|
||||
@@ -48,9 +48,21 @@ import:
|
||||
- package: github.com/olekukonko/tablewriter
|
||||
version: cca8bbc0798408af109aaaa239cbd2634846b340
|
||||
- package: github.com/prometheus/client_golang
|
||||
version: v0.8.0
|
||||
version: c5b7fccd204277076155f10851dad72b76a49317
|
||||
subpackages:
|
||||
- prometheus
|
||||
- package: github.com/prometheus/client_model
|
||||
version: 6f3806018612930941127f2a7c6c453ba2c527d2
|
||||
subpackages:
|
||||
- go
|
||||
- package: github.com/prometheus/common
|
||||
version: 195bde7883f7c39ea62b0d92ab7359b5327065cb
|
||||
subpackages:
|
||||
- expfmt
|
||||
- internal/bitbucket.org/ww/goautoneg
|
||||
- model
|
||||
- package: github.com/prometheus/procfs
|
||||
version: fcdb11ccb4389efb1b210b7ffb623ab71c5fdd60
|
||||
- package: github.com/spf13/cobra
|
||||
version: 1c44ec8d3f1552cac48999f9306da23c4d8a288b
|
||||
- package: github.com/spf13/pflag
|
||||
@@ -102,4 +114,22 @@ import:
|
||||
subpackages:
|
||||
- assert
|
||||
- package: github.com/karlseguin/ccache
|
||||
version: v2.0.2
|
||||
version: a2d62155777b39595c825ed3824279e642a5db3c
|
||||
- package: github.com/beorn7/perks
|
||||
version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||
- package: github.com/cpuguy83/go-md2man
|
||||
version: a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa
|
||||
- package: github.com/matttproud/golang_protobuf_extensions
|
||||
version: c12348ce28de40eed0136aa2b644d0ee0650e56c
|
||||
subpackages:
|
||||
- pbutil
|
||||
- package: github.com/russross/blackfriday
|
||||
version: 5f33e7b7878355cd2b7e6b8eefc48a5472c69f70
|
||||
- package: github.com/mattn/go-runewidth
|
||||
version: 737072b4e32b7a5018b4a7125da8d12de90e8045
|
||||
- package: github.com/shurcooL/sanitized_anchor_name
|
||||
version: 1dba4b3954bc059efc3991ec364f9f9a35f597d2
|
||||
- package: golang.org/x/sys
|
||||
version: 478fcf54317e52ab69f40bb4c7a1520288d7f7ea
|
||||
subpackages:
|
||||
- unix
|
||||
|
243
hack/scripts-dev/Makefile
Normal file
243
hack/scripts-dev/Makefile
Normal file
@@ -0,0 +1,243 @@
|
||||
# run from repository root
|
||||
#
|
||||
# Example:
|
||||
# make clean -f ./hack/scripts-dev/Makefile
|
||||
# make build -f ./hack/scripts-dev/Makefile
|
||||
|
||||
.PHONY: build
|
||||
build:
|
||||
GO_BUILD_FLAGS="-v" ./build
|
||||
./bin/etcd --version
|
||||
ETCDCTL_API=3 ./bin/etcdctl version
|
||||
|
||||
clean:
|
||||
rm -f ./codecov
|
||||
rm -rf ./covdir
|
||||
rm -f ./*.log
|
||||
rm -f ./bin/Dockerfile-release
|
||||
rm -rf ./bin/*.etcd
|
||||
rm -rf ./gopath
|
||||
rm -rf ./release
|
||||
rm -f ./integration/127.0.0.1:* ./integration/localhost:*
|
||||
rm -f ./clientv3/integration/127.0.0.1:* ./clientv3/integration/localhost:*
|
||||
rm -f ./clientv3/ordering/127.0.0.1:* ./clientv3/ordering/localhost:*
|
||||
|
||||
_GO_VERSION = 1.9.2
|
||||
ifdef GO_VERSION
|
||||
_GO_VERSION = $(GO_VERSION)
|
||||
endif
|
||||
|
||||
# Example:
|
||||
# GO_VERSION=1.8.5 make build-docker-test -f ./hack/scripts-dev/Makefile
|
||||
# make build-docker-test -f ./hack/scripts-dev/Makefile
|
||||
# gcloud docker -- login -u _json_key -p "$(cat /etc/gcp-key-etcd.json)" https://gcr.io
|
||||
# GO_VERSION=1.8.5 make push-docker-test -f ./hack/scripts-dev/Makefile
|
||||
# make push-docker-test -f ./hack/scripts-dev/Makefile
|
||||
# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com
|
||||
# GO_VERSION=1.8.5 make pull-docker-test -f ./hack/scripts-dev/Makefile
|
||||
# make pull-docker-test -f ./hack/scripts-dev/Makefile
|
||||
|
||||
build-docker-test:
|
||||
$(info GO_VERSION: $(_GO_VERSION))
|
||||
@cat ./Dockerfile-test | sed s/REPLACE_ME_GO_VERSION/$(_GO_VERSION)/ \
|
||||
> ./.Dockerfile-test
|
||||
docker build \
|
||||
--tag gcr.io/etcd-development/etcd-test:go$(_GO_VERSION) \
|
||||
--file ./.Dockerfile-test .
|
||||
|
||||
push-docker-test:
|
||||
$(info GO_VERSION: $(_GO_VERSION))
|
||||
gcloud docker -- push gcr.io/etcd-development/etcd-test:go$(_GO_VERSION)
|
||||
|
||||
pull-docker-test:
|
||||
$(info GO_VERSION: $(_GO_VERSION))
|
||||
docker pull gcr.io/etcd-development/etcd-test:go$(_GO_VERSION)
|
||||
|
||||
compile-with-docker-test:
|
||||
$(info GO_VERSION: $(_GO_VERSION))
|
||||
docker run \
|
||||
--rm \
|
||||
--volume=`pwd`/:/etcd \
|
||||
gcr.io/etcd-development/etcd-test:go$(_GO_VERSION) \
|
||||
/bin/bash -c "cd /etcd && GO_BUILD_FLAGS=-v ./build && ./bin/etcd --version"
|
||||
|
||||
# Local machine:
|
||||
# TEST_OPTS="PASSES='fmt'" make test -f ./hack/scripts-dev/Makefile
|
||||
# TEST_OPTS="PASSES='fmt bom dep compile build unit'" make test -f ./hack/scripts-dev/Makefile
|
||||
# TEST_OPTS="RELEASE_TEST=y INTEGRATION=y PASSES='build unit release integration_e2e functional'" make test -f ./hack/scripts-dev/Makefile
|
||||
# TEST_OPTS="PASSES='build grpcproxy'" make test -f ./hack/scripts-dev/Makefile
|
||||
#
|
||||
# Example (test with docker):
|
||||
# make pull-docker-test -f ./hack/scripts-dev/Makefile
|
||||
# TEST_OPTS="PASSES='fmt'" make docker-test -f ./hack/scripts-dev/Makefile
|
||||
# TEST_OPTS="VERBOSE=2 PASSES='unit'" make docker-test -f ./hack/scripts-dev/Makefile
|
||||
#
|
||||
# Travis CI (test with docker):
|
||||
# TEST_OPTS="PASSES='fmt bom dep compile build unit'" make docker-test -f ./hack/scripts-dev/Makefile
|
||||
#
|
||||
# Semaphore CI (test with docker):
|
||||
# TEST_OPTS="RELEASE_TEST=y INTEGRATION=y PASSES='build unit release integration_e2e functional'" make docker-test -f ./hack/scripts-dev/Makefile
|
||||
# TEST_OPTS="GOARCH=386 PASSES='build unit integration_e2e'" make docker-test -f ./hack/scripts-dev/Makefile
|
||||
#
|
||||
# grpc-proxy tests (test with docker):
|
||||
# TEST_OPTS="PASSES='build grpcproxy'" make docker-test -f ./hack/scripts-dev/Makefile
|
||||
|
||||
TEST_SUFFIX = $(shell date +%s | base64 | head -c 15)
|
||||
|
||||
_TEST_OPTS = "PASSES='unit'"
|
||||
ifdef TEST_OPTS
|
||||
_TEST_OPTS = $(TEST_OPTS)
|
||||
endif
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
$(info TEST_OPTS: $(_TEST_OPTS))
|
||||
$(info log-file: test-$(TEST_SUFFIX).log)
|
||||
$(_TEST_OPTS) ./test 2>&1 | tee test-$(TEST_SUFFIX).log
|
||||
! egrep "(--- FAIL:|leak)" -A10 -B50 test-$(TEST_SUFFIX).log
|
||||
|
||||
docker-test:
|
||||
$(info GO_VERSION: $(_GO_VERSION))
|
||||
$(info TEST_OPTS: $(_TEST_OPTS))
|
||||
$(info log-file: test-$(TEST_SUFFIX).log)
|
||||
docker run \
|
||||
--rm \
|
||||
--volume=`pwd`:/go/src/github.com/coreos/etcd \
|
||||
gcr.io/etcd-development/etcd-test:go$(_GO_VERSION) \
|
||||
/bin/bash -c "$(_TEST_OPTS) ./test 2>&1 | tee test-$(TEST_SUFFIX).log"
|
||||
! egrep "(--- FAIL:|leak)" -A10 -B50 test-$(TEST_SUFFIX).log
|
||||
|
||||
docker-test-coverage:
|
||||
$(info GO_VERSION: $(_GO_VERSION))
|
||||
$(info log-file: docker-test-coverage-$(TEST_SUFFIX).log)
|
||||
docker run \
|
||||
--rm \
|
||||
--volume=`pwd`:/go/src/github.com/coreos/etcd \
|
||||
gcr.io/etcd-development/etcd-test:go$(_GO_VERSION) \
|
||||
/bin/bash -c "COVERDIR=covdir PASSES='build build_cov cov' ./test 2>&1 | tee docker-test-coverage-$(TEST_SUFFIX).log && /codecov -t 6040de41-c073-4d6f-bbf8-d89256ef31e1"
|
||||
! egrep "(--- FAIL:|leak)" -A10 -B50 docker-test-coverage-$(TEST_SUFFIX).log
|
||||
|
||||
# build release container image with Linux
|
||||
_ETCD_VERSION ?= $(shell git rev-parse --short HEAD || echo "GitNotFound")
|
||||
ifdef ETCD_VERSION
|
||||
_ETCD_VERSION = $(ETCD_VERSION)
|
||||
endif
|
||||
|
||||
# Example:
|
||||
# ETCD_VERSION=v3.3.0-test.0 make build-docker-release-master -f ./hack/scripts-dev/Makefile
|
||||
# ETCD_VERSION=v3.3.0-test.0 make push-docker-release-master -f ./hack/scripts-dev/Makefile
|
||||
# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com
|
||||
|
||||
build-docker-release-master: compile-with-docker-test
|
||||
$(info ETCD_VERSION: $(_ETCD_VERSION))
|
||||
cp ./Dockerfile-release ./bin/Dockerfile-release
|
||||
docker build \
|
||||
--tag gcr.io/etcd-development/etcd:$(_ETCD_VERSION) \
|
||||
--file ./bin/Dockerfile-release \
|
||||
./bin
|
||||
rm -f ./bin/Dockerfile-release
|
||||
|
||||
docker run \
|
||||
--rm \
|
||||
gcr.io/etcd-development/etcd:$(_ETCD_VERSION) \
|
||||
/bin/sh -c "/usr/local/bin/etcd --version && ETCDCTL_API=3 /usr/local/bin/etcdctl version"
|
||||
|
||||
push-docker-release-master:
|
||||
$(info ETCD_VERSION: $(_ETCD_VERSION))
|
||||
gcloud docker -- push gcr.io/etcd-development/etcd:$(_ETCD_VERSION)
|
||||
|
||||
# Example:
|
||||
# make build-docker-test -f ./hack/scripts-dev/Makefile
|
||||
# make compile-with-docker-test -f ./hack/scripts-dev/Makefile
|
||||
# make build-docker-dns-test -f ./hack/scripts-dev/Makefile
|
||||
# gcloud docker -- login -u _json_key -p "$(cat /etc/gcp-key-etcd.json)" https://gcr.io
|
||||
# make push-docker-dns-test -f ./hack/scripts-dev/Makefile
|
||||
# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com
|
||||
# make pull-docker-dns-test -f ./hack/scripts-dev/Makefile
|
||||
# make docker-dns-test-run -f ./hack/scripts-dev/Makefile
|
||||
|
||||
# build base container image for DNS testing
|
||||
build-docker-dns-test:
|
||||
$(info GO_VERSION: $(_GO_VERSION))
|
||||
@cat ./hack/scripts-dev/docker-dns/Dockerfile | sed s/REPLACE_ME_GO_VERSION/$(_GO_VERSION)/ \
|
||||
> ./hack/scripts-dev/docker-dns/.Dockerfile
|
||||
|
||||
docker build \
|
||||
--tag gcr.io/etcd-development/etcd-dns-test:go$(_GO_VERSION) \
|
||||
--file ./hack/scripts-dev/docker-dns/.Dockerfile \
|
||||
./hack/scripts-dev/docker-dns
|
||||
|
||||
docker run \
|
||||
--rm \
|
||||
--dns 127.0.0.1 \
|
||||
gcr.io/etcd-development/etcd-dns-test:go$(_GO_VERSION) \
|
||||
/bin/bash -c "/etc/init.d/bind9 start && cat /dev/null >/etc/hosts && dig etcd.local"
|
||||
|
||||
push-docker-dns-test:
|
||||
$(info GO_VERSION: $(_GO_VERSION))
|
||||
gcloud docker -- push gcr.io/etcd-development/etcd-dns-test:go$(_GO_VERSION)
|
||||
|
||||
pull-docker-dns-test:
|
||||
$(info GO_VERSION: $(_GO_VERSION))
|
||||
docker pull gcr.io/etcd-development/etcd-dns-test:go$(_GO_VERSION)
|
||||
|
||||
# run DNS tests inside container
|
||||
docker-dns-test-run:
|
||||
$(info GO_VERSION: $(_GO_VERSION))
|
||||
docker run \
|
||||
--rm \
|
||||
--tty \
|
||||
--dns 127.0.0.1 \
|
||||
--volume=`pwd`/bin:/etcd \
|
||||
--volume=`pwd`/integration/fixtures:/certs \
|
||||
gcr.io/etcd-development/etcd-dns-test:go$(_GO_VERSION) \
|
||||
/bin/bash -c "cd /etcd && /run.sh && rm -rf m*.etcd"
|
||||
|
||||
# Example:
|
||||
# make build-docker-test -f ./hack/scripts-dev/Makefile
|
||||
# make compile-with-docker-test -f ./hack/scripts-dev/Makefile
|
||||
# make build-docker-dns-srv-test -f ./hack/scripts-dev/Makefile
|
||||
# gcloud docker -- login -u _json_key -p "$(cat /etc/gcp-key-etcd.json)" https://gcr.io
|
||||
# make push-docker-dns-srv-test -f ./hack/scripts-dev/Makefile
|
||||
# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com
|
||||
# make pull-docker-dns-srv-test -f ./hack/scripts-dev/Makefile
|
||||
# make docker-dns-srv-test-run -f ./hack/scripts-dev/Makefile
|
||||
|
||||
# build base container image for DNS/SRV testing
|
||||
build-docker-dns-srv-test:
|
||||
$(info GO_VERSION: $(_GO_VERSION))
|
||||
@cat ./hack/scripts-dev/docker-dns-srv/Dockerfile | sed s/REPLACE_ME_GO_VERSION/$(_GO_VERSION)/ \
|
||||
> ./hack/scripts-dev/docker-dns-srv/.Dockerfile
|
||||
|
||||
docker build \
|
||||
--tag gcr.io/etcd-development/etcd-dns-srv-test:go$(_GO_VERSION) \
|
||||
--file ./hack/scripts-dev/docker-dns-srv/.Dockerfile \
|
||||
./hack/scripts-dev/docker-dns-srv
|
||||
|
||||
docker run \
|
||||
--rm \
|
||||
--dns 127.0.0.1 \
|
||||
gcr.io/etcd-development/etcd-dns-srv-test:go$(_GO_VERSION) \
|
||||
/bin/bash -c "/etc/init.d/bind9 start && cat /dev/null >/etc/hosts && dig +noall +answer SRV _etcd-client-ssl._tcp.etcd.local && dig +noall +answer SRV _etcd-server-ssl._tcp.etcd.local && dig +noall +answer m1.etcd.local m2.etcd.local m3.etcd.local"
|
||||
|
||||
push-docker-dns-srv-test:
|
||||
$(info GO_VERSION: $(_GO_VERSION))
|
||||
gcloud docker -- push gcr.io/etcd-development/etcd-dns-srv-test:go$(_GO_VERSION)
|
||||
|
||||
pull-docker-dns-srv-test:
|
||||
$(info GO_VERSION: $(_GO_VERSION))
|
||||
docker pull gcr.io/etcd-development/etcd-dns-srv-test:go$(_GO_VERSION)
|
||||
|
||||
# run DNS/SRV tests inside container
|
||||
docker-dns-srv-test-run:
|
||||
$(info GO_VERSION: $(_GO_VERSION))
|
||||
docker run \
|
||||
--rm \
|
||||
--tty \
|
||||
--dns 127.0.0.1 \
|
||||
--volume=`pwd`/bin:/etcd \
|
||||
--volume=`pwd`/integration/fixtures:/certs \
|
||||
gcr.io/etcd-development/etcd-dns-srv-test:go$(_GO_VERSION) \
|
||||
/bin/bash -c "cd /etcd && /run.sh && rm -rf m*.etcd"
|
||||
|
||||
# TODO: add DNS integration tests
|
2
hack/scripts-dev/README
Normal file
2
hack/scripts-dev/README
Normal file
@@ -0,0 +1,2 @@
|
||||
|
||||
scripts for etcd development
|
48
hack/scripts-dev/docker-dns-srv/.Dockerfile
Normal file
48
hack/scripts-dev/docker-dns-srv/.Dockerfile
Normal file
@@ -0,0 +1,48 @@
|
||||
FROM ubuntu:16.10
|
||||
|
||||
RUN rm /bin/sh && ln -s /bin/bash /bin/sh
|
||||
RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
|
||||
|
||||
RUN apt-get -y update \
|
||||
&& apt-get -y install \
|
||||
build-essential \
|
||||
gcc \
|
||||
apt-utils \
|
||||
pkg-config \
|
||||
software-properties-common \
|
||||
apt-transport-https \
|
||||
libssl-dev \
|
||||
sudo \
|
||||
bash \
|
||||
curl \
|
||||
tar \
|
||||
git \
|
||||
netcat \
|
||||
bind9 \
|
||||
dnsutils \
|
||||
&& apt-get -y update \
|
||||
&& apt-get -y upgrade \
|
||||
&& apt-get -y autoremove \
|
||||
&& apt-get -y autoclean
|
||||
|
||||
ENV GOROOT /usr/local/go
|
||||
ENV GOPATH /go
|
||||
ENV PATH ${GOPATH}/bin:${GOROOT}/bin:${PATH}
|
||||
ENV GO_VERSION 1.9.2
|
||||
ENV GO_DOWNLOAD_URL https://storage.googleapis.com/golang
|
||||
RUN rm -rf ${GOROOT} \
|
||||
&& curl -s ${GO_DOWNLOAD_URL}/go${GO_VERSION}.linux-amd64.tar.gz | tar -v -C /usr/local/ -xz \
|
||||
&& mkdir -p ${GOPATH}/src ${GOPATH}/bin \
|
||||
&& go version
|
||||
|
||||
RUN mkdir -p /var/bind /etc/bind
|
||||
RUN chown root:bind /var/bind /etc/bind
|
||||
ADD Procfile /Procfile
|
||||
ADD run.sh /run.sh
|
||||
|
||||
ADD named.conf etcd.zone rdns.zone /etc/bind/
|
||||
RUN chown root:bind /etc/bind/named.conf /etc/bind/etcd.zone /etc/bind/rdns.zone
|
||||
ADD resolv.conf /etc/resolv.conf
|
||||
|
||||
RUN go get github.com/mattn/goreman
|
||||
CMD ["/run.sh"]
|
48
hack/scripts-dev/docker-dns-srv/Dockerfile
Normal file
48
hack/scripts-dev/docker-dns-srv/Dockerfile
Normal file
@@ -0,0 +1,48 @@
|
||||
FROM ubuntu:16.10
|
||||
|
||||
RUN rm /bin/sh && ln -s /bin/bash /bin/sh
|
||||
RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
|
||||
|
||||
RUN apt-get -y update \
|
||||
&& apt-get -y install \
|
||||
build-essential \
|
||||
gcc \
|
||||
apt-utils \
|
||||
pkg-config \
|
||||
software-properties-common \
|
||||
apt-transport-https \
|
||||
libssl-dev \
|
||||
sudo \
|
||||
bash \
|
||||
curl \
|
||||
tar \
|
||||
git \
|
||||
netcat \
|
||||
bind9 \
|
||||
dnsutils \
|
||||
&& apt-get -y update \
|
||||
&& apt-get -y upgrade \
|
||||
&& apt-get -y autoremove \
|
||||
&& apt-get -y autoclean
|
||||
|
||||
ENV GOROOT /usr/local/go
|
||||
ENV GOPATH /go
|
||||
ENV PATH ${GOPATH}/bin:${GOROOT}/bin:${PATH}
|
||||
ENV GO_VERSION REPLACE_ME_GO_VERSION
|
||||
ENV GO_DOWNLOAD_URL https://storage.googleapis.com/golang
|
||||
RUN rm -rf ${GOROOT} \
|
||||
&& curl -s ${GO_DOWNLOAD_URL}/go${GO_VERSION}.linux-amd64.tar.gz | tar -v -C /usr/local/ -xz \
|
||||
&& mkdir -p ${GOPATH}/src ${GOPATH}/bin \
|
||||
&& go version
|
||||
|
||||
RUN mkdir -p /var/bind /etc/bind
|
||||
RUN chown root:bind /var/bind /etc/bind
|
||||
ADD Procfile /Procfile
|
||||
ADD run.sh /run.sh
|
||||
|
||||
ADD named.conf etcd.zone rdns.zone /etc/bind/
|
||||
RUN chown root:bind /etc/bind/named.conf /etc/bind/etcd.zone /etc/bind/rdns.zone
|
||||
ADD resolv.conf /etc/resolv.conf
|
||||
|
||||
RUN go get github.com/mattn/goreman
|
||||
CMD ["/run.sh"]
|
5
hack/scripts-dev/docker-dns-srv/Procfile
Normal file
5
hack/scripts-dev/docker-dns-srv/Procfile
Normal file
@@ -0,0 +1,5 @@
|
||||
etcd1: ./etcd --name m1 --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:2380 --initial-advertise-peer-urls=https://m1.etcd.local:2380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs/server-wildcard.crt --peer-key-file=/certs/server-wildcard.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server-wildcard.crt --key-file=/certs/server-wildcard.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth
|
||||
|
||||
etcd2: ./etcd --name m2 --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs/server-wildcard.crt --peer-key-file=/certs/server-wildcard.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server-wildcard.crt --key-file=/certs/server-wildcard.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth
|
||||
|
||||
etcd3: ./etcd --name m3 --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token tkn --discovery-srv=etcd.local --initial-cluster-state new --peer-cert-file=/certs/server-wildcard.crt --peer-key-file=/certs/server-wildcard.key.insecure --peer-trusted-ca-file=/certs/ca.crt --peer-client-cert-auth --cert-file=/certs/server-wildcard.crt --key-file=/certs/server-wildcard.key.insecure --trusted-ca-file=/certs/ca.crt --client-cert-auth
|
16
hack/scripts-dev/docker-dns-srv/etcd.zone
Normal file
16
hack/scripts-dev/docker-dns-srv/etcd.zone
Normal file
@@ -0,0 +1,16 @@
|
||||
etcd.local. IN SOA bindhostname. admin.etcd.local. (
|
||||
1452607488
|
||||
10800
|
||||
3600
|
||||
604800
|
||||
38400 )
|
||||
etcd.local. IN NS bindhostname.
|
||||
m1.etcd.local. 300 IN A 127.0.0.1
|
||||
m2.etcd.local. 300 IN A 127.0.0.1
|
||||
m3.etcd.local. 300 IN A 127.0.0.1
|
||||
_etcd-client-ssl._tcp 300 IN SRV 0 0 2379 m1.etcd.local.
|
||||
_etcd-client-ssl._tcp 300 IN SRV 0 0 22379 m2.etcd.local.
|
||||
_etcd-client-ssl._tcp 300 IN SRV 0 0 32379 m3.etcd.local.
|
||||
_etcd-server-ssl._tcp 300 IN SRV 0 0 2380 m1.etcd.local.
|
||||
_etcd-server-ssl._tcp 300 IN SRV 0 0 22380 m2.etcd.local.
|
||||
_etcd-server-ssl._tcp 300 IN SRV 0 0 32380 m3.etcd.local.
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user