Compare commits
28 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
c65a9e2dd1 | ||
![]() |
7862f6ed2c | ||
![]() |
257319fb18 | ||
![]() |
cdb2dc11b8 | ||
![]() |
770674e4a6 | ||
![]() |
c10168f718 | ||
![]() |
94673a6ba4 | ||
![]() |
a1bf5574fc | ||
![]() |
6d646c442a | ||
![]() |
1226686cf3 | ||
![]() |
50e12328ac | ||
![]() |
0dc78a144b | ||
![]() |
7cf32c262c | ||
![]() |
4a9247a47e | ||
![]() |
ac63c2fbd0 | ||
![]() |
ae5bd3c268 | ||
![]() |
94e46ba0d7 | ||
![]() |
8c10973820 | ||
![]() |
1af0b51537 | ||
![]() |
f4669c3b62 | ||
![]() |
55c3476abc | ||
![]() |
b66203c0a1 | ||
![]() |
4388404f56 | ||
![]() |
a447d51f23 | ||
![]() |
4f3c81d81d | ||
![]() |
478da3bf24 | ||
![]() |
d6b30e43cd | ||
![]() |
1e98c9642e |
@@ -105,7 +105,7 @@ func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64)
|
||||
token, err := tk.SignedString(t.key)
|
||||
if err != nil {
|
||||
if t.lg != nil {
|
||||
t.lg.Warn(
|
||||
t.lg.Debug(
|
||||
"failed to sign a JWT token",
|
||||
zap.String("user-name", username),
|
||||
zap.Uint64("revision", revision),
|
||||
@@ -118,7 +118,7 @@ func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64)
|
||||
}
|
||||
|
||||
if t.lg != nil {
|
||||
t.lg.Info(
|
||||
t.lg.Debug(
|
||||
"created/assigned a new JWT token",
|
||||
zap.String("user-name", username),
|
||||
zap.Uint64("revision", revision),
|
||||
@@ -136,7 +136,7 @@ func newTokenProviderJWT(lg *zap.Logger, optMap map[string]string) (*tokenJWT, e
|
||||
err = opts.ParseWithDefaults(optMap)
|
||||
if err != nil {
|
||||
if lg != nil {
|
||||
lg.Warn("problem loading JWT options", zap.Error(err))
|
||||
lg.Error("problem loading JWT options", zap.Error(err))
|
||||
} else {
|
||||
plog.Errorf("problem loading JWT options: %s", err)
|
||||
}
|
||||
|
@@ -306,7 +306,7 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string
|
||||
return nil, ErrAuthFailed
|
||||
}
|
||||
|
||||
if user.Options.NoPassword {
|
||||
if user.Options != nil && user.Options.NoPassword {
|
||||
return nil, ErrAuthFailed
|
||||
}
|
||||
|
||||
@@ -344,7 +344,7 @@ func (as *authStore) CheckPassword(username, password string) (uint64, error) {
|
||||
return 0, ErrAuthFailed
|
||||
}
|
||||
|
||||
if user.Options.NoPassword {
|
||||
if user.Options != nil && user.Options.NoPassword {
|
||||
return 0, ErrAuthFailed
|
||||
}
|
||||
|
||||
@@ -388,7 +388,8 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse,
|
||||
var hashed []byte
|
||||
var err error
|
||||
|
||||
if r.Options != nil && !r.Options.NoPassword {
|
||||
noPassword := r.Options != nil && r.Options.NoPassword
|
||||
if !noPassword {
|
||||
hashed, err = bcrypt.GenerateFromPassword([]byte(r.Password), as.bcryptCost)
|
||||
if err != nil {
|
||||
if as.lg != nil {
|
||||
|
@@ -113,10 +113,9 @@ func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOp
|
||||
return nil, status.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()")
|
||||
}
|
||||
newStreamer, err := streamer(ctx, desc, cc, method, grpcOpts...)
|
||||
logger.Warn("retry stream intercept", zap.Error(err))
|
||||
if err != nil {
|
||||
// TODO(mwitkow): Maybe dial and transport errors should be retriable?
|
||||
return nil, err
|
||||
logger.Error("streamer failed to create ClientStream", zap.Error(err))
|
||||
return nil, err // TODO(mwitkow): Maybe dial and transport errors should be retriable?
|
||||
}
|
||||
retryingStreamer := &serverStreamingRetryingStream{
|
||||
client: c,
|
||||
@@ -185,6 +184,7 @@ func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error {
|
||||
if !attemptRetry {
|
||||
return lastErr // success or hard failure
|
||||
}
|
||||
|
||||
// We start off from attempt 1, because zeroth was already made on normal SendMsg().
|
||||
for attempt := uint(1); attempt < s.callOpts.max; attempt++ {
|
||||
if err := waitRetryBackoff(s.ctx, attempt, s.callOpts); err != nil {
|
||||
@@ -192,12 +192,13 @@ func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error {
|
||||
}
|
||||
newStream, err := s.reestablishStreamAndResendBuffer(s.ctx)
|
||||
if err != nil {
|
||||
// TODO(mwitkow): Maybe dial and transport errors should be retriable?
|
||||
return err
|
||||
s.client.lg.Error("failed reestablishStreamAndResendBuffer", zap.Error(err))
|
||||
return err // TODO(mwitkow): Maybe dial and transport errors should be retriable?
|
||||
}
|
||||
s.setStream(newStream)
|
||||
|
||||
s.client.lg.Warn("retrying RecvMsg", zap.Error(lastErr))
|
||||
attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m)
|
||||
//fmt.Printf("Received message and indicate: %v %v\n", attemptRetry, lastErr)
|
||||
if !attemptRetry {
|
||||
return lastErr
|
||||
}
|
||||
|
@@ -37,11 +37,17 @@ const (
|
||||
)
|
||||
|
||||
// NewPeerHandler generates an http.Handler to handle etcd peer requests.
|
||||
func NewPeerHandler(lg *zap.Logger, s etcdserver.ServerPeer) http.Handler {
|
||||
return newPeerHandler(lg, s, s.RaftHandler(), s.LeaseHandler())
|
||||
func NewPeerHandler(lg *zap.Logger, s etcdserver.ServerPeerV2) http.Handler {
|
||||
return newPeerHandler(lg, s, s.RaftHandler(), s.LeaseHandler(), s.HashKVHandler())
|
||||
}
|
||||
|
||||
func newPeerHandler(lg *zap.Logger, s etcdserver.Server, raftHandler http.Handler, leaseHandler http.Handler) http.Handler {
|
||||
func newPeerHandler(
|
||||
lg *zap.Logger,
|
||||
s etcdserver.Server,
|
||||
raftHandler http.Handler,
|
||||
leaseHandler http.Handler,
|
||||
hashKVHandler http.Handler,
|
||||
) http.Handler {
|
||||
peerMembersHandler := newPeerMembersHandler(lg, s.Cluster())
|
||||
peerMemberPromoteHandler := newPeerMemberPromoteHandler(lg, s)
|
||||
|
||||
@@ -55,6 +61,9 @@ func newPeerHandler(lg *zap.Logger, s etcdserver.Server, raftHandler http.Handle
|
||||
mux.Handle(leasehttp.LeasePrefix, leaseHandler)
|
||||
mux.Handle(leasehttp.LeaseInternalPrefix, leaseHandler)
|
||||
}
|
||||
if hashKVHandler != nil {
|
||||
mux.Handle(etcdserver.PeerHashKVPath, hashKVHandler)
|
||||
}
|
||||
mux.HandleFunc(versionPath, versionHandler(s.Cluster(), serveVersion))
|
||||
return mux
|
||||
}
|
||||
|
@@ -83,7 +83,7 @@ var fakeRaftHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Reque
|
||||
// TestNewPeerHandlerOnRaftPrefix tests that NewPeerHandler returns a handler that
|
||||
// handles raft-prefix requests well.
|
||||
func TestNewPeerHandlerOnRaftPrefix(t *testing.T) {
|
||||
ph := newPeerHandler(zap.NewExample(), &fakeServer{cluster: &fakeCluster{}}, fakeRaftHandler, nil)
|
||||
ph := newPeerHandler(zap.NewExample(), &fakeServer{cluster: &fakeCluster{}}, fakeRaftHandler, nil, nil)
|
||||
srv := httptest.NewServer(ph)
|
||||
defer srv.Close()
|
||||
|
||||
@@ -231,7 +231,7 @@ func TestServeMemberPromoteFails(t *testing.T) {
|
||||
|
||||
// TestNewPeerHandlerOnMembersPromotePrefix verifies the request with members promote prefix is routed correctly
|
||||
func TestNewPeerHandlerOnMembersPromotePrefix(t *testing.T) {
|
||||
ph := newPeerHandler(zap.NewExample(), &fakeServer{cluster: &fakeCluster{}}, fakeRaftHandler, nil)
|
||||
ph := newPeerHandler(zap.NewExample(), &fakeServer{cluster: &fakeCluster{}}, fakeRaftHandler, nil, nil)
|
||||
srv := httptest.NewServer(ph)
|
||||
defer srv.Close()
|
||||
|
||||
|
@@ -657,8 +657,8 @@ func (c *RaftCluster) IsReadyToRemoveVotingMember(id uint64) bool {
|
||||
}
|
||||
|
||||
func (c *RaftCluster) IsReadyToPromoteMember(id uint64) bool {
|
||||
nmembers := 1
|
||||
nstarted := 0
|
||||
nmembers := 1 // We count the learner to be promoted for the future quorum
|
||||
nstarted := 1 // and we also count it as started.
|
||||
|
||||
for _, member := range c.VotingMembers() {
|
||||
if member.IsStarted() {
|
||||
|
@@ -859,3 +859,90 @@ func TestIsReadyToRemoveVotingMember(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsReadyToPromoteMember(t *testing.T) {
|
||||
tests := []struct {
|
||||
members []*Member
|
||||
promoteID uint64
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
// 1/1 members ready, should succeed (quorum = 1, new quorum = 2)
|
||||
[]*Member{
|
||||
newTestMember(1, nil, "1", nil),
|
||||
newTestMemberAsLearner(2, nil, "2", nil),
|
||||
},
|
||||
2,
|
||||
true,
|
||||
},
|
||||
{
|
||||
// 0/1 members ready, should fail (quorum = 1)
|
||||
[]*Member{
|
||||
newTestMember(1, nil, "", nil),
|
||||
newTestMemberAsLearner(2, nil, "2", nil),
|
||||
},
|
||||
2,
|
||||
false,
|
||||
},
|
||||
{
|
||||
// 2/2 members ready, should succeed (quorum = 2)
|
||||
[]*Member{
|
||||
newTestMember(1, nil, "1", nil),
|
||||
newTestMember(2, nil, "2", nil),
|
||||
newTestMemberAsLearner(3, nil, "3", nil),
|
||||
},
|
||||
3,
|
||||
true,
|
||||
},
|
||||
{
|
||||
// 1/2 members ready, should succeed (quorum = 2)
|
||||
[]*Member{
|
||||
newTestMember(1, nil, "1", nil),
|
||||
newTestMember(2, nil, "", nil),
|
||||
newTestMemberAsLearner(3, nil, "3", nil),
|
||||
},
|
||||
3,
|
||||
true,
|
||||
},
|
||||
{
|
||||
// 1/3 members ready, should fail (quorum = 2)
|
||||
[]*Member{
|
||||
newTestMember(1, nil, "1", nil),
|
||||
newTestMember(2, nil, "", nil),
|
||||
newTestMember(3, nil, "", nil),
|
||||
newTestMemberAsLearner(4, nil, "4", nil),
|
||||
},
|
||||
4,
|
||||
false,
|
||||
},
|
||||
{
|
||||
// 2/3 members ready, should succeed (quorum = 2, new quorum = 3)
|
||||
[]*Member{
|
||||
newTestMember(1, nil, "1", nil),
|
||||
newTestMember(2, nil, "2", nil),
|
||||
newTestMember(3, nil, "", nil),
|
||||
newTestMemberAsLearner(4, nil, "4", nil),
|
||||
},
|
||||
4,
|
||||
true,
|
||||
},
|
||||
{
|
||||
// 2/4 members ready, should succeed (quorum = 3)
|
||||
[]*Member{
|
||||
newTestMember(1, nil, "1", nil),
|
||||
newTestMember(2, nil, "2", nil),
|
||||
newTestMember(3, nil, "", nil),
|
||||
newTestMember(4, nil, "", nil),
|
||||
newTestMemberAsLearner(5, nil, "5", nil),
|
||||
},
|
||||
5,
|
||||
true,
|
||||
},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
c := newTestCluster(tt.members)
|
||||
if got := c.IsReadyToPromoteMember(tt.promoteID); got != tt.want {
|
||||
t.Errorf("%d: isReadyToPromoteMember returned %t, want %t", i, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -226,6 +226,9 @@ func (s *Snapshotter) snapNames() ([]string, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = s.cleanupSnapdir(names); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
snaps := checkSuffix(s.lg, names)
|
||||
if len(snaps) == 0 {
|
||||
return nil, ErrNoSnapshot
|
||||
@@ -253,3 +256,21 @@ func checkSuffix(lg *zap.Logger, names []string) []string {
|
||||
}
|
||||
return snaps
|
||||
}
|
||||
|
||||
// cleanupSnapdir removes any files that should not be in the snapshot directory:
|
||||
// - db.tmp prefixed files that can be orphaned by defragmentation
|
||||
func (s *Snapshotter) cleanupSnapdir(filenames []string) error {
|
||||
for _, filename := range filenames {
|
||||
if strings.HasPrefix(filename, "db.tmp") {
|
||||
if s.lg != nil {
|
||||
s.lg.Info("found orphaned defragmentation file; deleting", zap.String("path", filename))
|
||||
} else {
|
||||
plog.Infof("found orphaned defragmentation file; deleting: %s", filename)
|
||||
}
|
||||
if rmErr := os.Remove(filepath.Join(s.dir, filename)); rmErr != nil && !os.IsNotExist(rmErr) {
|
||||
return fmt.Errorf("failed to remove orphaned defragmentation file %s: %v", filename, rmErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -15,11 +15,15 @@
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
|
||||
"go.etcd.io/etcd/mvcc"
|
||||
@@ -230,10 +234,12 @@ func (s *EtcdServer) checkHashKV() error {
|
||||
mismatch(uint64(s.ID()))
|
||||
}
|
||||
|
||||
checkedCount := 0
|
||||
for _, p := range peers {
|
||||
if p.resp == nil {
|
||||
continue
|
||||
}
|
||||
checkedCount++
|
||||
id := p.resp.Header.MemberId
|
||||
|
||||
// leader expects follower's latest revision less than or equal to leader's
|
||||
@@ -298,62 +304,56 @@ func (s *EtcdServer) checkHashKV() error {
|
||||
mismatch(id)
|
||||
}
|
||||
}
|
||||
if lg != nil {
|
||||
lg.Info("finished peer corruption check", zap.Int("number-of-peers-checked", checkedCount))
|
||||
} else {
|
||||
plog.Infof("finished peer corruption check")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type peerHashKVResp struct {
|
||||
type peerInfo struct {
|
||||
id types.ID
|
||||
eps []string
|
||||
}
|
||||
|
||||
resp *clientv3.HashKVResponse
|
||||
type peerHashKVResp struct {
|
||||
peerInfo
|
||||
resp *pb.HashKVResponse
|
||||
err error
|
||||
}
|
||||
|
||||
func (s *EtcdServer) getPeerHashKVs(rev int64) (resps []*peerHashKVResp) {
|
||||
func (s *EtcdServer) getPeerHashKVs(rev int64) []*peerHashKVResp {
|
||||
// TODO: handle the case when "s.cluster.Members" have not
|
||||
// been populated (e.g. no snapshot to load from disk)
|
||||
mbs := s.cluster.Members()
|
||||
pss := make([]peerHashKVResp, len(mbs))
|
||||
for _, m := range mbs {
|
||||
members := s.cluster.Members()
|
||||
peers := make([]peerInfo, 0, len(members))
|
||||
for _, m := range members {
|
||||
if m.ID == s.ID() {
|
||||
continue
|
||||
}
|
||||
pss = append(pss, peerHashKVResp{id: m.ID, eps: m.PeerURLs})
|
||||
peers = append(peers, peerInfo{id: m.ID, eps: m.PeerURLs})
|
||||
}
|
||||
|
||||
lg := s.getLogger()
|
||||
|
||||
for _, p := range pss {
|
||||
var resps []*peerHashKVResp
|
||||
for _, p := range peers {
|
||||
if len(p.eps) == 0 {
|
||||
continue
|
||||
}
|
||||
cli, cerr := clientv3.New(clientv3.Config{
|
||||
DialTimeout: s.Cfg.ReqTimeout(),
|
||||
Endpoints: p.eps,
|
||||
})
|
||||
if cerr != nil {
|
||||
if lg != nil {
|
||||
lg.Warn(
|
||||
"failed to create client to peer URL",
|
||||
zap.String("local-member-id", s.ID().String()),
|
||||
zap.String("remote-peer-id", p.id.String()),
|
||||
zap.Strings("remote-peer-endpoints", p.eps),
|
||||
zap.Error(cerr),
|
||||
)
|
||||
} else {
|
||||
plog.Warningf("%s failed to create client to peer %q for hash checking (%q)", s.ID(), p.eps, cerr.Error())
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
respsLen := len(resps)
|
||||
for _, c := range cli.Endpoints() {
|
||||
var lastErr error
|
||||
for _, ep := range p.eps {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
|
||||
var resp *clientv3.HashKVResponse
|
||||
resp, cerr = cli.HashKV(ctx, c, rev)
|
||||
|
||||
var resp *pb.HashKVResponse
|
||||
resp, lastErr = s.getPeerHashKVHTTP(ctx, ep, rev)
|
||||
cancel()
|
||||
if cerr == nil {
|
||||
resps = append(resps, &peerHashKVResp{id: p.id, eps: p.eps, resp: resp, err: nil})
|
||||
if lastErr == nil {
|
||||
resps = append(resps, &peerHashKVResp{peerInfo: p, resp: resp, err: nil})
|
||||
break
|
||||
}
|
||||
if lg != nil {
|
||||
@@ -361,17 +361,17 @@ func (s *EtcdServer) getPeerHashKVs(rev int64) (resps []*peerHashKVResp) {
|
||||
"failed hash kv request",
|
||||
zap.String("local-member-id", s.ID().String()),
|
||||
zap.Int64("requested-revision", rev),
|
||||
zap.String("remote-peer-endpoint", c),
|
||||
zap.Error(cerr),
|
||||
zap.String("remote-peer-endpoint", ep),
|
||||
zap.Error(lastErr),
|
||||
)
|
||||
} else {
|
||||
plog.Warningf("%s hash-kv error %q on peer %q with revision %d", s.ID(), cerr.Error(), c, rev)
|
||||
plog.Warningf("%s hash-kv error %q on peer %q with revision %d", s.ID(), lastErr.Error(), ep, rev)
|
||||
}
|
||||
}
|
||||
cli.Close()
|
||||
|
||||
// failed to get hashKV from all endpoints of this peer
|
||||
if respsLen == len(resps) {
|
||||
resps = append(resps, &peerHashKVResp{id: p.id, eps: p.eps, resp: nil, err: cerr})
|
||||
resps = append(resps, &peerHashKVResp{peerInfo: p, resp: nil, err: lastErr})
|
||||
}
|
||||
}
|
||||
return resps
|
||||
@@ -410,3 +410,112 @@ func (a *applierV3Corrupt) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantR
|
||||
func (a *applierV3Corrupt) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
type ServerPeerV2 interface {
|
||||
ServerPeer
|
||||
HashKVHandler() http.Handler
|
||||
}
|
||||
|
||||
const PeerHashKVPath = "/members/hashkv"
|
||||
|
||||
type hashKVHandler struct {
|
||||
lg *zap.Logger
|
||||
server *EtcdServer
|
||||
}
|
||||
|
||||
func (s *EtcdServer) HashKVHandler() http.Handler {
|
||||
return &hashKVHandler{lg: s.getLogger(), server: s}
|
||||
}
|
||||
|
||||
func (h *hashKVHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
w.Header().Set("Allow", http.MethodGet)
|
||||
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
if r.URL.Path != PeerHashKVPath {
|
||||
http.Error(w, "bad path", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
defer r.Body.Close()
|
||||
b, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
http.Error(w, "error reading body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
req := &pb.HashKVRequest{}
|
||||
if err = json.Unmarshal(b, req); err != nil {
|
||||
h.lg.Warn("failed to unmarshal request", zap.Error(err))
|
||||
http.Error(w, "error unmarshalling request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
hash, rev, compactRev, err := h.server.KV().HashByRev(req.Revision)
|
||||
if err != nil {
|
||||
h.lg.Warn(
|
||||
"failed to get hashKV",
|
||||
zap.Int64("requested-revision", req.Revision),
|
||||
zap.Error(err),
|
||||
)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
resp := &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: hash, CompactRevision: compactRev}
|
||||
respBytes, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
h.lg.Warn("failed to marshal hashKV response", zap.Error(err))
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("X-Etcd-Cluster-ID", h.server.Cluster().ID().String())
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(respBytes)
|
||||
}
|
||||
|
||||
// getPeerHashKVHTTP fetch hash of kv store at the given rev via http call to the given url
|
||||
func (s *EtcdServer) getPeerHashKVHTTP(ctx context.Context, url string, rev int64) (*pb.HashKVResponse, error) {
|
||||
cc := &http.Client{Transport: s.peerRt}
|
||||
hashReq := &pb.HashKVRequest{Revision: rev}
|
||||
hashReqBytes, err := json.Marshal(hashReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
requestUrl := url + PeerHashKVPath
|
||||
req, err := http.NewRequest(http.MethodGet, requestUrl, bytes.NewReader(hashReqBytes))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Cancel = ctx.Done()
|
||||
|
||||
resp, err := cc.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode == http.StatusBadRequest {
|
||||
if strings.Contains(string(b), mvcc.ErrCompacted.Error()) {
|
||||
return nil, rpctypes.ErrCompacted
|
||||
}
|
||||
if strings.Contains(string(b), mvcc.ErrFutureRev.Error()) {
|
||||
return nil, rpctypes.ErrFutureRev
|
||||
}
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("unknown error: %s", string(b))
|
||||
}
|
||||
|
||||
hashResp := &pb.HashKVResponse{}
|
||||
if err := json.Unmarshal(b, hashResp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return hashResp, nil
|
||||
}
|
||||
|
@@ -807,12 +807,13 @@ func (s *EtcdServer) start() {
|
||||
|
||||
func (s *EtcdServer) purgeFile() {
|
||||
var dberrc, serrc, werrc <-chan error
|
||||
var dbdonec, sdonec, wdonec <-chan struct{}
|
||||
if s.Cfg.MaxSnapFiles > 0 {
|
||||
dberrc = fileutil.PurgeFile(s.getLogger(), s.Cfg.SnapDir(), "snap.db", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done)
|
||||
serrc = fileutil.PurgeFile(s.getLogger(), s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done)
|
||||
dbdonec, dberrc = fileutil.PurgeFileWithDoneNotify(s.getLogger(), s.Cfg.SnapDir(), "snap.db", s.Cfg.MaxSnapFiles, purgeFileInterval, s.stopping)
|
||||
sdonec, serrc = fileutil.PurgeFileWithDoneNotify(s.getLogger(), s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.stopping)
|
||||
}
|
||||
if s.Cfg.MaxWALFiles > 0 {
|
||||
werrc = fileutil.PurgeFile(s.getLogger(), s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.done)
|
||||
wdonec, werrc = fileutil.PurgeFileWithDoneNotify(s.getLogger(), s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.stopping)
|
||||
}
|
||||
|
||||
lg := s.getLogger()
|
||||
@@ -836,6 +837,15 @@ func (s *EtcdServer) purgeFile() {
|
||||
plog.Fatalf("failed to purge wal file %v", e)
|
||||
}
|
||||
case <-s.stopping:
|
||||
if dbdonec != nil {
|
||||
<-dbdonec
|
||||
}
|
||||
if sdonec != nil {
|
||||
<-sdonec
|
||||
}
|
||||
if wdonec != nil {
|
||||
<-wdonec
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@@ -577,12 +577,7 @@ func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftReque
|
||||
}
|
||||
|
||||
func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) {
|
||||
for {
|
||||
resp, err := s.raftRequestOnce(ctx, r)
|
||||
if err != auth.ErrAuthOldRevision {
|
||||
return resp, err
|
||||
}
|
||||
}
|
||||
return s.raftRequestOnce(ctx, r)
|
||||
}
|
||||
|
||||
// doSerialize handles the auth logic, with permissions checked by "chk", for a serialized request "get". Returns a non-nil error on authentication failure.
|
||||
|
@@ -357,6 +357,7 @@ func TestV3AuthNonAuthorizedRPCs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestV3AuthOldRevConcurrent(t *testing.T) {
|
||||
t.Skip() // TODO(jingyih): re-enable the test when #10408 is fixed.
|
||||
defer testutil.AfterTest(t)
|
||||
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
|
||||
defer clus.Terminate(t)
|
||||
|
@@ -369,13 +369,27 @@ func (b *backend) defrag() error {
|
||||
|
||||
b.batchTx.tx = nil
|
||||
|
||||
tmpdb, err := bolt.Open(b.db.Path()+".tmp", 0600, boltOpenOptions)
|
||||
// Create a temporary file to ensure we start with a clean slate.
|
||||
// Snapshotter.cleanupSnapdir cleans up any of these that are found during startup.
|
||||
dir := filepath.Dir(b.db.Path())
|
||||
temp, err := ioutil.TempFile(dir, "db.tmp.*")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
options := bolt.Options{}
|
||||
if boltOpenOptions != nil {
|
||||
options = *boltOpenOptions
|
||||
}
|
||||
options.OpenFile = func(path string, i int, mode os.FileMode) (file *os.File, err error) {
|
||||
return temp, nil
|
||||
}
|
||||
tdbp := temp.Name()
|
||||
tmpdb, err := bolt.Open(tdbp, 0600, &options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dbp := b.db.Path()
|
||||
tdbp := tmpdb.Path()
|
||||
size1, sizeInUse1 := b.Size(), b.SizeInUse()
|
||||
if b.lg != nil {
|
||||
b.lg.Info(
|
||||
@@ -387,11 +401,17 @@ func (b *backend) defrag() error {
|
||||
zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse1))),
|
||||
)
|
||||
}
|
||||
|
||||
// gofail: var defragBeforeCopy struct{}
|
||||
err = defragdb(b.db, tmpdb, defragLimit)
|
||||
if err != nil {
|
||||
tmpdb.Close()
|
||||
os.RemoveAll(tmpdb.Path())
|
||||
if rmErr := os.RemoveAll(tmpdb.Path()); rmErr != nil {
|
||||
if b.lg != nil {
|
||||
b.lg.Error("failed to remove db.tmp after defragmentation completed", zap.Error(rmErr))
|
||||
} else {
|
||||
plog.Fatalf("failed to remove db.tmp after defragmentation completed: %v", rmErr)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -411,6 +431,7 @@ func (b *backend) defrag() error {
|
||||
plog.Fatalf("cannot close database (%s)", err)
|
||||
}
|
||||
}
|
||||
// gofail: var defragBeforeRename struct{}
|
||||
err = os.Rename(tdbp, dbp)
|
||||
if err != nil {
|
||||
if b.lg != nil {
|
||||
|
@@ -43,6 +43,7 @@ func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struc
|
||||
rev = bytesToRev(key)
|
||||
if _, ok := keep[rev]; !ok {
|
||||
tx.UnsafeDelete(keyBucketName, key)
|
||||
keyCompactions++
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -296,6 +296,14 @@ var (
|
||||
// overridden by mvcc initialization
|
||||
reportCompactRevMu sync.RWMutex
|
||||
reportCompactRev = func() float64 { return 0 }
|
||||
|
||||
totalPutSizeGauge = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "etcd_debugging",
|
||||
Subsystem: "mvcc",
|
||||
Name: "total_put_size_in_bytes",
|
||||
Help: "The total size of put kv pairs seen by this member.",
|
||||
})
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -325,6 +333,7 @@ func init() {
|
||||
prometheus.MustRegister(hashRevSec)
|
||||
prometheus.MustRegister(currentRev)
|
||||
prometheus.MustRegister(compactRev)
|
||||
prometheus.MustRegister(totalPutSizeGauge)
|
||||
}
|
||||
|
||||
// ReportEventReceived reports that an event is received.
|
||||
|
@@ -21,14 +21,15 @@ type metricsTxnWrite struct {
|
||||
ranges uint
|
||||
puts uint
|
||||
deletes uint
|
||||
putSize int64
|
||||
}
|
||||
|
||||
func newMetricsTxnRead(tr TxnRead) TxnRead {
|
||||
return &metricsTxnWrite{&txnReadWrite{tr}, 0, 0, 0}
|
||||
return &metricsTxnWrite{&txnReadWrite{tr}, 0, 0, 0, 0}
|
||||
}
|
||||
|
||||
func newMetricsTxnWrite(tw TxnWrite) TxnWrite {
|
||||
return &metricsTxnWrite{tw, 0, 0, 0}
|
||||
return &metricsTxnWrite{tw, 0, 0, 0, 0}
|
||||
}
|
||||
|
||||
func (tw *metricsTxnWrite) Range(key, end []byte, ro RangeOptions) (*RangeResult, error) {
|
||||
@@ -43,6 +44,8 @@ func (tw *metricsTxnWrite) DeleteRange(key, end []byte) (n, rev int64) {
|
||||
|
||||
func (tw *metricsTxnWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) {
|
||||
tw.puts++
|
||||
size := int64(len(key) + len(value))
|
||||
tw.putSize += size
|
||||
return tw.TxnWrite.Put(key, value, lease)
|
||||
}
|
||||
|
||||
@@ -60,6 +63,7 @@ func (tw *metricsTxnWrite) End() {
|
||||
puts := float64(tw.puts)
|
||||
putCounter.Add(puts)
|
||||
putCounterDebug.Add(puts) // TODO: remove in 3.5 release
|
||||
totalPutSizeGauge.Add(float64(tw.putSize))
|
||||
|
||||
deletes := float64(tw.deletes)
|
||||
deleteCounter.Add(deletes)
|
||||
|
@@ -25,13 +25,23 @@ import (
|
||||
)
|
||||
|
||||
func PurgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error {
|
||||
return purgeFile(lg, dirname, suffix, max, interval, stop, nil)
|
||||
return purgeFile(lg, dirname, suffix, max, interval, stop, nil, nil)
|
||||
}
|
||||
|
||||
func PurgeFileWithDoneNotify(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) (<-chan struct{}, <-chan error) {
|
||||
doneC := make(chan struct{})
|
||||
errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC)
|
||||
return doneC, errC
|
||||
}
|
||||
|
||||
// purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil.
|
||||
func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string) <-chan error {
|
||||
// if donec is non-nil, the function closes it to notify its exit.
|
||||
func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string, donec chan<- struct{}) <-chan error {
|
||||
errC := make(chan error, 1)
|
||||
go func() {
|
||||
if donec != nil {
|
||||
defer close(donec)
|
||||
}
|
||||
for {
|
||||
fnames, err := ReadDir(dirname)
|
||||
if err != nil {
|
||||
|
@@ -45,7 +45,7 @@ func TestPurgeFile(t *testing.T) {
|
||||
stop, purgec := make(chan struct{}), make(chan string, 10)
|
||||
|
||||
// keep 3 most recent files
|
||||
errch := purgeFile(zap.NewExample(), dir, "test", 3, time.Millisecond, stop, purgec)
|
||||
errch := purgeFile(zap.NewExample(), dir, "test", 3, time.Millisecond, stop, purgec, nil)
|
||||
select {
|
||||
case f := <-purgec:
|
||||
t.Errorf("unexpected purge on %q", f)
|
||||
@@ -116,7 +116,7 @@ func TestPurgeFileHoldingLockFile(t *testing.T) {
|
||||
}
|
||||
|
||||
stop, purgec := make(chan struct{}), make(chan string, 10)
|
||||
errch := purgeFile(zap.NewExample(), dir, "test", 3, time.Millisecond, stop, purgec)
|
||||
errch := purgeFile(zap.NewExample(), dir, "test", 3, time.Millisecond, stop, purgec, nil)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
select {
|
||||
|
@@ -37,12 +37,6 @@ main() {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
KEYID=$(gpg --list-keys --with-colons| awk -F: '/^pub:/ { print $5 }')
|
||||
if [[ -z "${KEYID}" ]]; then
|
||||
echo "Failed to load gpg key. Is gpg set up correctly for etcd releases?"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Expected umask for etcd release artifacts
|
||||
umask 022
|
||||
|
||||
@@ -116,6 +110,11 @@ main() {
|
||||
echo "Skipping tag step. git tag ${RELEASE_VERSION} already exists."
|
||||
else
|
||||
echo "Tagging release..."
|
||||
KEYID=$(gpg --list-keys --with-colons| awk -F: '/^pub:/ { print $5 }')
|
||||
if [[ -z "${KEYID}" ]]; then
|
||||
echo "Failed to load gpg key. Is gpg set up correctly for etcd releases?"
|
||||
exit 1
|
||||
fi
|
||||
git tag --local-user "${KEYID}" --sign "${RELEASE_VERSION}" --message "${RELEASE_VERSION}"
|
||||
fi
|
||||
|
||||
|
@@ -186,21 +186,18 @@ func testV3CurlTxn(cx ctlCtx) {
|
||||
|
||||
func testV3CurlAuth(cx ctlCtx) {
|
||||
p := cx.apiPrefix
|
||||
usernames := []string{"root", "nonroot", "nooption"}
|
||||
pwds := []string{"toor", "pass", "pass"}
|
||||
options := []*authpb.UserAddOptions{{NoPassword: false}, {NoPassword: false}, nil}
|
||||
|
||||
// create root user
|
||||
rootuser, err := json.Marshal(&pb.AuthUserAddRequest{Name: string("root"), Password: string("toor"), Options: &authpb.UserAddOptions{NoPassword: false}})
|
||||
testutil.AssertNil(cx.t, err)
|
||||
// create users
|
||||
for i := 0; i < len(usernames); i++ {
|
||||
user, err := json.Marshal(&pb.AuthUserAddRequest{Name: usernames[i], Password: pwds[i], Options: options[i]})
|
||||
testutil.AssertNil(cx.t, err)
|
||||
|
||||
if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/auth/user/add"), value: string(rootuser), expected: "revision"}); err != nil {
|
||||
cx.t.Fatalf("failed testV3CurlAuth add user with curl (%v)", err)
|
||||
}
|
||||
|
||||
// create non root user
|
||||
nonrootuser, err := json.Marshal(&pb.AuthUserAddRequest{Name: string("example.com"), Password: string("example"), Options: &authpb.UserAddOptions{NoPassword: false}})
|
||||
testutil.AssertNil(cx.t, err)
|
||||
|
||||
if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/auth/user/add"), value: string(nonrootuser), expected: "revision"}); err != nil {
|
||||
cx.t.Fatalf("failed testV3CurlAuth add user with curl (%v)", err)
|
||||
if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/auth/user/add"), value: string(user), expected: "revision"}); err != nil {
|
||||
cx.t.Fatalf("failed testV3CurlAuth add user %v with curl (%v)", usernames[i], err)
|
||||
}
|
||||
}
|
||||
|
||||
// create root role
|
||||
@@ -211,20 +208,14 @@ func testV3CurlAuth(cx ctlCtx) {
|
||||
cx.t.Fatalf("failed testV3CurlAuth create role with curl using prefix (%s) (%v)", p, err)
|
||||
}
|
||||
|
||||
// grant root role
|
||||
grantroleroot, err := json.Marshal(&pb.AuthUserGrantRoleRequest{User: string("root"), Role: string("root")})
|
||||
testutil.AssertNil(cx.t, err)
|
||||
//grant root role
|
||||
for i := 0; i < len(usernames); i++ {
|
||||
grantroleroot, err := json.Marshal(&pb.AuthUserGrantRoleRequest{User: usernames[i], Role: "root"})
|
||||
testutil.AssertNil(cx.t, err)
|
||||
|
||||
if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/auth/user/grant"), value: string(grantroleroot), expected: "revision"}); err != nil {
|
||||
cx.t.Fatalf("failed testV3CurlAuth grant role with curl using prefix (%s) (%v)", p, err)
|
||||
}
|
||||
|
||||
// grant non root user root role
|
||||
grantrole, err := json.Marshal(&pb.AuthUserGrantRoleRequest{User: string("example.com"), Role: string("root")})
|
||||
testutil.AssertNil(cx.t, err)
|
||||
|
||||
if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/auth/user/grant"), value: string(grantrole), expected: "revision"}); err != nil {
|
||||
cx.t.Fatalf("failed testV3CurlAuth grant role with curl using prefix (%s) (%v)", p, err)
|
||||
if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/auth/user/grant"), value: string(grantroleroot), expected: "revision"}); err != nil {
|
||||
cx.t.Fatalf("failed testV3CurlAuth grant role with curl using prefix (%s) (%v)", p, err)
|
||||
}
|
||||
}
|
||||
|
||||
// enable auth
|
||||
@@ -232,45 +223,47 @@ func testV3CurlAuth(cx ctlCtx) {
|
||||
cx.t.Fatalf("failed testV3CurlAuth enable auth with curl using prefix (%s) (%v)", p, err)
|
||||
}
|
||||
|
||||
// put "bar" into "foo"
|
||||
putreq, err := json.Marshal(&pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")})
|
||||
testutil.AssertNil(cx.t, err)
|
||||
for i := 0; i < len(usernames); i++ {
|
||||
// put "bar[i]" into "foo[i]"
|
||||
putreq, err := json.Marshal(&pb.PutRequest{Key: []byte(fmt.Sprintf("foo%d", i)), Value: []byte(fmt.Sprintf("bar%d", i))})
|
||||
testutil.AssertNil(cx.t, err)
|
||||
|
||||
// fail put no auth
|
||||
if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/put"), value: string(putreq), expected: "error"}); err != nil {
|
||||
cx.t.Fatalf("failed testV3CurlAuth no auth put with curl using prefix (%s) (%v)", p, err)
|
||||
}
|
||||
// fail put no auth
|
||||
if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/put"), value: string(putreq), expected: "error"}); err != nil {
|
||||
cx.t.Fatalf("failed testV3CurlAuth no auth put with curl using prefix (%s) (%v)", p, err)
|
||||
}
|
||||
|
||||
// auth request
|
||||
authreq, err := json.Marshal(&pb.AuthenticateRequest{Name: string("root"), Password: string("toor")})
|
||||
testutil.AssertNil(cx.t, err)
|
||||
// auth request
|
||||
authreq, err := json.Marshal(&pb.AuthenticateRequest{Name: usernames[i], Password: pwds[i]})
|
||||
testutil.AssertNil(cx.t, err)
|
||||
|
||||
var (
|
||||
authHeader string
|
||||
cmdArgs []string
|
||||
lineFunc = func(txt string) bool { return true }
|
||||
)
|
||||
var (
|
||||
authHeader string
|
||||
cmdArgs []string
|
||||
lineFunc = func(txt string) bool { return true }
|
||||
)
|
||||
|
||||
cmdArgs = cURLPrefixArgs(cx.epc, "POST", cURLReq{endpoint: path.Join(p, "/auth/authenticate"), value: string(authreq)})
|
||||
proc, err := spawnCmd(cmdArgs)
|
||||
testutil.AssertNil(cx.t, err)
|
||||
cmdArgs = cURLPrefixArgs(cx.epc, "POST", cURLReq{endpoint: path.Join(p, "/auth/authenticate"), value: string(authreq)})
|
||||
proc, err := spawnCmd(cmdArgs)
|
||||
testutil.AssertNil(cx.t, err)
|
||||
|
||||
cURLRes, err := proc.ExpectFunc(lineFunc)
|
||||
testutil.AssertNil(cx.t, err)
|
||||
cURLRes, err := proc.ExpectFunc(lineFunc)
|
||||
testutil.AssertNil(cx.t, err)
|
||||
|
||||
authRes := make(map[string]interface{})
|
||||
testutil.AssertNil(cx.t, json.Unmarshal([]byte(cURLRes), &authRes))
|
||||
authRes := make(map[string]interface{})
|
||||
testutil.AssertNil(cx.t, json.Unmarshal([]byte(cURLRes), &authRes))
|
||||
|
||||
token, ok := authRes[rpctypes.TokenFieldNameGRPC].(string)
|
||||
if !ok {
|
||||
cx.t.Fatalf("failed invalid token in authenticate response with curl")
|
||||
}
|
||||
token, ok := authRes[rpctypes.TokenFieldNameGRPC].(string)
|
||||
if !ok {
|
||||
cx.t.Fatalf("failed invalid token in authenticate response with curl using user (%v)", usernames[i])
|
||||
}
|
||||
|
||||
authHeader = "Authorization: " + token
|
||||
authHeader = "Authorization: " + token
|
||||
|
||||
// put with auth
|
||||
if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/put"), value: string(putreq), header: authHeader, expected: "revision"}); err != nil {
|
||||
cx.t.Fatalf("failed testV3CurlAuth auth put with curl using prefix (%s) (%v)", p, err)
|
||||
// put with auth
|
||||
if err = cURLPost(cx.epc, cURLReq{endpoint: path.Join(p, "/kv/put"), value: string(putreq), header: authHeader, expected: "revision"}); err != nil {
|
||||
cx.t.Fatalf("failed testV3CurlAuth auth put with curl using prefix (%s) and user (%v) (%v)", p, usernames[i], err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -26,7 +26,7 @@ import (
|
||||
var (
|
||||
// MinClusterVersion is the min cluster version this etcd binary is compatible with.
|
||||
MinClusterVersion = "3.0.0"
|
||||
Version = "3.4.3"
|
||||
Version = "3.4.4"
|
||||
APIVersion = "unknown"
|
||||
|
||||
// Git SHA Value will be set during build
|
||||
|
Reference in New Issue
Block a user