tests: Use zaptest.NewLogger in tests

dependabot/go_modules/go.uber.org/atomic-1.10.0
Marek Siarkowicz 2022-04-01 11:31:05 +02:00
parent b1610934e3
commit 804fddf921
47 changed files with 408 additions and 434 deletions

View File

@ -20,7 +20,7 @@ import (
"strings"
"testing"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
func TestSetFlagsFromEnv(t *testing.T) {
@ -49,7 +49,7 @@ func TestSetFlagsFromEnv(t *testing.T) {
}
// now read the env and verify flags were updated as expected
err := SetFlagsFromEnv(zap.NewExample(), "ETCD", fs)
err := SetFlagsFromEnv(zaptest.NewLogger(t), "ETCD", fs)
if err != nil {
t.Errorf("err=%v, want nil", err)
}
@ -68,7 +68,7 @@ func TestSetFlagsFromEnvBad(t *testing.T) {
fs := flag.NewFlagSet("testing", flag.ExitOnError)
fs.Int("x", 0, "")
os.Setenv("ETCD_X", "not_a_number")
if err := SetFlagsFromEnv(zap.NewExample(), "ETCD", fs); err == nil {
if err := SetFlagsFromEnv(zaptest.NewLogger(t), "ETCD", fs); err == nil {
t.Errorf("err=nil, want != nil")
}
}
@ -83,7 +83,7 @@ func TestSetFlagsFromEnvParsingError(t *testing.T) {
}
defer os.Unsetenv("ETCD_HEARTBEAT_INTERVAL")
err := SetFlagsFromEnv(zap.NewExample(), "ETCD", fs)
err := SetFlagsFromEnv(zaptest.NewLogger(t), "ETCD", fs)
for _, v := range []string{"invalid syntax", "parse error"} {
if strings.Contains(err.Error(), v) {
err = nil

View File

@ -25,7 +25,7 @@ import (
"testing"
"time"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
func TestResolveTCPAddrs(t *testing.T) {
@ -130,7 +130,7 @@ func TestResolveTCPAddrs(t *testing.T) {
return &net.TCPAddr{IP: net.ParseIP(tt.hostMap[host]), Port: i, Zone: ""}, nil
}
ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
urls, err := resolveTCPAddrs(ctx, zap.NewExample(), tt.urls)
urls, err := resolveTCPAddrs(ctx, zaptest.NewLogger(t), tt.urls)
cancel()
if tt.hasError {
if err == nil {
@ -301,7 +301,7 @@ func TestURLsEqual(t *testing.T) {
}
for i, test := range tests {
result, err := urlsEqual(context.TODO(), zap.NewExample(), test.a, test.b)
result, err := urlsEqual(context.TODO(), zaptest.NewLogger(t), test.a, test.b)
if result != test.expect {
t.Errorf("idx=%d #%d: a:%v b:%v, expected %v but %v", i, test.n, test.a, test.b, test.expect, result)
}
@ -334,7 +334,7 @@ func TestURLStringsEqual(t *testing.T) {
for idx, c := range cases {
t.Logf("TestURLStringsEqual, case #%d", idx)
resolveTCPAddr = c.resolver
result, err := URLStringsEqual(context.TODO(), zap.NewExample(), c.urlsA, c.urlsB)
result, err := URLStringsEqual(context.TODO(), zaptest.NewLogger(t), c.urlsA, c.urlsB)
if !result {
t.Errorf("unexpected result %v", result)
}

View File

@ -22,7 +22,7 @@ import (
"testing"
"time"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
func init() { setDflSignal = func(syscall.Signal) {} }
@ -71,7 +71,7 @@ func TestHandleInterrupts(t *testing.T) {
c := make(chan os.Signal, 2)
signal.Notify(c, sig)
HandleInterrupts(zap.NewExample())
HandleInterrupts(zaptest.NewLogger(t))
syscall.Kill(syscall.Getpid(), sig)
// we should receive the signal once from our own kill and

View File

@ -19,8 +19,7 @@ import (
"go.etcd.io/etcd/api/v3/authpb"
"go.etcd.io/etcd/pkg/v3/adt"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
func TestRangePermission(t *testing.T) {
@ -53,7 +52,7 @@ func TestRangePermission(t *testing.T) {
readPerms.Insert(p, struct{}{})
}
result := checkKeyInterval(zap.NewExample(), &unifiedRangePermissions{readPerms: readPerms}, tt.begin, tt.end, authpb.READ)
result := checkKeyInterval(zaptest.NewLogger(t), &unifiedRangePermissions{readPerms: readPerms}, tt.begin, tt.end, authpb.READ)
if result != tt.want {
t.Errorf("#%d: result=%t, want=%t", i, result, tt.want)
}
@ -94,7 +93,7 @@ func TestKeyPermission(t *testing.T) {
readPerms.Insert(p, struct{}{})
}
result := checkKeyPoint(zap.NewExample(), &unifiedRangePermissions{readPerms: readPerms}, tt.key, authpb.READ)
result := checkKeyPoint(zaptest.NewLogger(t), &unifiedRangePermissions{readPerms: readPerms}, tt.key, authpb.READ)
if result != tt.want {
t.Errorf("#%d: result=%t, want=%t", i, result, tt.want)
}

View File

@ -18,15 +18,15 @@ import (
"context"
"testing"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
// TestSimpleTokenDisabled ensures that TokenProviderSimple behaves correctly when
// disabled.
func TestSimpleTokenDisabled(t *testing.T) {
initialState := newTokenProviderSimple(zap.NewExample(), dummyIndexWaiter, simpleTokenTTLDefault)
initialState := newTokenProviderSimple(zaptest.NewLogger(t), dummyIndexWaiter, simpleTokenTTLDefault)
explicitlyDisabled := newTokenProviderSimple(zap.NewExample(), dummyIndexWaiter, simpleTokenTTLDefault)
explicitlyDisabled := newTokenProviderSimple(zaptest.NewLogger(t), dummyIndexWaiter, simpleTokenTTLDefault)
explicitlyDisabled.enable()
explicitlyDisabled.disable()
@ -48,7 +48,7 @@ func TestSimpleTokenDisabled(t *testing.T) {
// TestSimpleTokenAssign ensures that TokenProviderSimple can correctly assign a
// token, look it up with info, and invalidate it by user.
func TestSimpleTokenAssign(t *testing.T) {
tp := newTokenProviderSimple(zap.NewExample(), dummyIndexWaiter, simpleTokenTTLDefault)
tp := newTokenProviderSimple(zaptest.NewLogger(t), dummyIndexWaiter, simpleTokenTTLDefault)
tp.enable()
defer tp.disable()
ctx := context.WithValue(context.WithValue(context.TODO(), AuthenticateParamIndex{}, uint64(1)), AuthenticateParamSimpleTokenPrefix{}, "dummy")

View File

@ -18,16 +18,17 @@ import (
"context"
"encoding/base64"
"fmt"
"github.com/stretchr/testify/assert"
"strings"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"go.uber.org/zap/zaptest"
"go.etcd.io/etcd/api/v3/authpb"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.uber.org/zap"
"golang.org/x/crypto/bcrypt"
"google.golang.org/grpc/metadata"
)
@ -43,12 +44,12 @@ func dummyIndexWaiter(index uint64) <-chan struct{} {
// TestNewAuthStoreRevision ensures newly auth store
// keeps the old revision when there are no changes.
func TestNewAuthStoreRevision(t *testing.T) {
tp, err := NewTokenProvider(zap.NewExample(), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
if err != nil {
t.Fatal(err)
}
be := newBackendMock()
as := NewAuthStore(zap.NewExample(), be, tp, bcrypt.MinCost)
as := NewAuthStore(zaptest.NewLogger(t), be, tp, bcrypt.MinCost)
err = enableAuthAndCreateRoot(as)
if err != nil {
t.Fatal(err)
@ -57,7 +58,7 @@ func TestNewAuthStoreRevision(t *testing.T) {
as.Close()
// no changes to commit
as = NewAuthStore(zap.NewExample(), be, tp, bcrypt.MinCost)
as = NewAuthStore(zaptest.NewLogger(t), be, tp, bcrypt.MinCost)
defer as.Close()
new := as.Revision()
@ -68,14 +69,14 @@ func TestNewAuthStoreRevision(t *testing.T) {
// TestNewAuthStoreBryptCost ensures that NewAuthStore uses default when given bcrypt-cost is invalid
func TestNewAuthStoreBcryptCost(t *testing.T) {
tp, err := NewTokenProvider(zap.NewExample(), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
if err != nil {
t.Fatal(err)
}
invalidCosts := [2]int{bcrypt.MinCost - 1, bcrypt.MaxCost + 1}
for _, invalidCost := range invalidCosts {
as := NewAuthStore(zap.NewExample(), newBackendMock(), tp, invalidCost)
as := NewAuthStore(zaptest.NewLogger(t), newBackendMock(), tp, invalidCost)
defer as.Close()
if as.BcryptCost() != bcrypt.DefaultCost {
t.Fatalf("expected DefaultCost when bcryptcost is invalid")
@ -89,11 +90,11 @@ func encodePassword(s string) string {
}
func setupAuthStore(t *testing.T) (store *authStore, teardownfunc func(t *testing.T)) {
tp, err := NewTokenProvider(zap.NewExample(), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
if err != nil {
t.Fatal(err)
}
as := NewAuthStore(zap.NewExample(), newBackendMock(), tp, bcrypt.MinCost)
as := NewAuthStore(zaptest.NewLogger(t), newBackendMock(), tp, bcrypt.MinCost)
err = enableAuthAndCreateRoot(as)
if err != nil {
t.Fatal(err)
@ -680,11 +681,11 @@ func TestIsAuthEnabled(t *testing.T) {
// TestAuthRevisionRace ensures that access to authStore.revision is thread-safe.
func TestAuthInfoFromCtxRace(t *testing.T) {
tp, err := NewTokenProvider(zap.NewExample(), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
if err != nil {
t.Fatal(err)
}
as := NewAuthStore(zap.NewExample(), newBackendMock(), tp, bcrypt.MinCost)
as := NewAuthStore(zaptest.NewLogger(t), newBackendMock(), tp, bcrypt.MinCost)
defer as.Close()
donec := make(chan struct{})
@ -753,11 +754,11 @@ func TestRecoverFromSnapshot(t *testing.T) {
as.Close()
tp, err := NewTokenProvider(zap.NewExample(), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
if err != nil {
t.Fatal(err)
}
as2 := NewAuthStore(zap.NewExample(), as.be, tp, bcrypt.MinCost)
as2 := NewAuthStore(zaptest.NewLogger(t), as.be, tp, bcrypt.MinCost)
defer as2.Close()
if !as2.IsAuthEnabled() {
@ -830,12 +831,12 @@ func TestHammerSimpleAuthenticate(t *testing.T) {
// TestRolesOrder tests authpb.User.Roles is sorted
func TestRolesOrder(t *testing.T) {
tp, err := NewTokenProvider(zap.NewExample(), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
defer tp.disable()
if err != nil {
t.Fatal(err)
}
as := NewAuthStore(zap.NewExample(), newBackendMock(), tp, bcrypt.MinCost)
as := NewAuthStore(zaptest.NewLogger(t), newBackendMock(), tp, bcrypt.MinCost)
defer as.Close()
err = enableAuthAndCreateRoot(as)
if err != nil {
@ -884,11 +885,11 @@ func TestAuthInfoFromCtxWithRootJWT(t *testing.T) {
// testAuthInfoFromCtxWithRoot ensures "WithRoot" properly embeds token in the context.
func testAuthInfoFromCtxWithRoot(t *testing.T, opts string) {
tp, err := NewTokenProvider(zap.NewExample(), opts, dummyIndexWaiter, simpleTokenTTLDefault)
tp, err := NewTokenProvider(zaptest.NewLogger(t), opts, dummyIndexWaiter, simpleTokenTTLDefault)
if err != nil {
t.Fatal(err)
}
as := NewAuthStore(zap.NewExample(), newBackendMock(), tp, bcrypt.MinCost)
as := NewAuthStore(zaptest.NewLogger(t), newBackendMock(), tp, bcrypt.MinCost)
defer as.Close()
if err = enableAuthAndCreateRoot(as); err != nil {

View File

@ -19,8 +19,7 @@ import (
"testing"
"go.etcd.io/etcd/client/pkg/v3/types"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
func mustNewURLs(t *testing.T, urls []string) []url.URL {
@ -39,7 +38,7 @@ func TestConfigVerifyBootstrapWithoutClusterAndDiscoveryURLFail(t *testing.T) {
Name: "node1",
DiscoveryURL: "",
InitialPeerURLsMap: types.URLsMap{},
Logger: zap.NewExample(),
Logger: zaptest.NewLogger(t),
}
if err := c.VerifyBootstrap(); err == nil {
t.Errorf("err = nil, want not nil")
@ -57,7 +56,7 @@ func TestConfigVerifyExistingWithDiscoveryURLFail(t *testing.T) {
PeerURLs: mustNewURLs(t, []string{"http://127.0.0.1:2380"}),
InitialPeerURLsMap: cluster,
NewCluster: false,
Logger: zap.NewExample(),
Logger: zaptest.NewLogger(t),
}
if err := c.VerifyJoinExisting(); err == nil {
t.Errorf("err = nil, want not nil")
@ -145,7 +144,7 @@ func TestConfigVerifyLocalMember(t *testing.T) {
cfg := ServerConfig{
Name: "node1",
InitialPeerURLsMap: cluster,
Logger: zap.NewExample(),
Logger: zaptest.NewLogger(t),
}
if tt.apurls != nil {
cfg.PeerURLs = mustNewURLs(t, tt.apurls)
@ -170,7 +169,7 @@ func TestSnapDir(t *testing.T) {
for dd, w := range tests {
cfg := ServerConfig{
DataDir: dd,
Logger: zap.NewExample(),
Logger: zaptest.NewLogger(t),
}
if g := cfg.SnapDir(); g != w {
t.Errorf("DataDir=%q: SnapDir()=%q, want=%q", dd, g, w)
@ -186,7 +185,7 @@ func TestWALDir(t *testing.T) {
for dd, w := range tests {
cfg := ServerConfig{
DataDir: dd,
Logger: zap.NewExample(),
Logger: zaptest.NewLogger(t),
}
if g := cfg.WALDir(); g != w {
t.Errorf("DataDir=%q: WALDir()=%q, want=%q", dd, g, w)
@ -203,7 +202,7 @@ func TestShouldDiscover(t *testing.T) {
for durl, w := range tests {
cfg := ServerConfig{
DiscoveryURL: durl,
Logger: zap.NewExample(),
Logger: zaptest.NewLogger(t),
}
if g := cfg.ShouldDiscover(); g != w {
t.Errorf("durl=%q: ShouldDiscover()=%t, want=%t", durl, g, w)

View File

@ -14,7 +14,7 @@ import (
"go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/raft/v3"
"go.etcd.io/etcd/server/v3/etcdserver"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
type fakeStats struct{}
@ -98,7 +98,7 @@ func TestHealthHandler(t *testing.T) {
for i, tt := range tests {
func() {
mux := http.NewServeMux()
HandleMetricsHealth(zap.NewExample(), mux, &fakeServerV2{
HandleMetricsHealth(zaptest.NewLogger(t), mux, &fakeServerV2{
fakeServer: fakeServer{alarms: tt.alarms},
health: tt.health,
})

View File

@ -26,7 +26,7 @@ import (
"strings"
"testing"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
"github.com/coreos/go-semver/semver"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
@ -85,7 +85,7 @@ var fakeRaftHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Reque
// TestNewPeerHandlerOnRaftPrefix tests that NewPeerHandler returns a handler that
// handles raft-prefix requests well.
func TestNewPeerHandlerOnRaftPrefix(t *testing.T) {
ph := newPeerHandler(zap.NewExample(), &fakeServer{cluster: &fakeCluster{}}, fakeRaftHandler, nil, nil, nil)
ph := newPeerHandler(zaptest.NewLogger(t), &fakeServer{cluster: &fakeCluster{}}, fakeRaftHandler, nil, nil, nil)
srv := httptest.NewServer(ph)
defer srv.Close()
@ -233,7 +233,7 @@ func TestServeMemberPromoteFails(t *testing.T) {
// TestNewPeerHandlerOnMembersPromotePrefix verifies the request with members promote prefix is routed correctly
func TestNewPeerHandlerOnMembersPromotePrefix(t *testing.T) {
ph := newPeerHandler(zap.NewExample(), &fakeServer{cluster: &fakeCluster{}}, fakeRaftHandler, nil, nil, nil)
ph := newPeerHandler(zaptest.NewLogger(t), &fakeServer{cluster: &fakeCluster{}}, fakeRaftHandler, nil, nil, nil)
srv := httptest.NewServer(ph)
defer srv.Close()

View File

@ -28,8 +28,6 @@ import (
"go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
"go.etcd.io/etcd/server/v3/mock/mockstore"
"go.uber.org/zap"
)
func TestClusterMember(t *testing.T) {
@ -241,7 +239,7 @@ func TestClusterValidateAndAssignIDsBad(t *testing.T) {
for i, tt := range tests {
ecl := newTestCluster(t, tt.clmembs)
lcl := newTestCluster(t, tt.membs)
if err := ValidateClusterAndAssignIDs(zap.NewExample(), lcl, ecl); err == nil {
if err := ValidateClusterAndAssignIDs(zaptest.NewLogger(t), lcl, ecl); err == nil {
t.Errorf("#%d: unexpected update success", i)
}
}
@ -268,7 +266,7 @@ func TestClusterValidateAndAssignIDs(t *testing.T) {
for i, tt := range tests {
lcl := newTestCluster(t, tt.clmembs)
ecl := newTestCluster(t, tt.membs)
if err := ValidateClusterAndAssignIDs(zap.NewExample(), lcl, ecl); err != nil {
if err := ValidateClusterAndAssignIDs(zaptest.NewLogger(t), lcl, ecl); err != nil {
t.Errorf("#%d: unexpect update error: %v", i, err)
}
if !reflect.DeepEqual(lcl.MemberIDs(), tt.wids) {
@ -509,7 +507,7 @@ func TestNodeToMemberBad(t *testing.T) {
}},
}
for i, tt := range tests {
if _, err := nodeToMember(zap.NewExample(), tt); err == nil {
if _, err := nodeToMember(zaptest.NewLogger(t), tt); err == nil {
t.Errorf("#%d: unexpected nil error", i)
}
}
@ -638,7 +636,7 @@ func TestNodeToMember(t *testing.T) {
{Key: "/1234/raftAttributes", Value: stringp(`{"peerURLs":null}`)},
}}
wm := &Member{ID: 0x1234, RaftAttributes: RaftAttributes{}, Attributes: Attributes{Name: "node1"}}
m, err := nodeToMember(zap.NewExample(), n)
m, err := nodeToMember(zaptest.NewLogger(t), n)
if err != nil {
t.Fatalf("unexpected nodeToMember error: %v", err)
}

View File

@ -25,8 +25,7 @@ import (
"go.etcd.io/etcd/raft/v3"
"go.etcd.io/etcd/raft/v3/raftpb"
stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
func TestSendMessage(t *testing.T) {
@ -36,7 +35,7 @@ func TestSendMessage(t *testing.T) {
ClusterID: types.ID(1),
Raft: &fakeRaft{},
ServerStats: newServerStats(),
LeaderStats: stats.NewLeaderStats(zap.NewExample(), "1"),
LeaderStats: stats.NewLeaderStats(zaptest.NewLogger(t), "1"),
}
tr.Start()
srv := httptest.NewServer(tr.Handler())
@ -50,7 +49,7 @@ func TestSendMessage(t *testing.T) {
ClusterID: types.ID(1),
Raft: p,
ServerStats: newServerStats(),
LeaderStats: stats.NewLeaderStats(zap.NewExample(), "2"),
LeaderStats: stats.NewLeaderStats(zaptest.NewLogger(t), "2"),
}
tr2.Start()
srv2 := httptest.NewServer(tr2.Handler())
@ -94,7 +93,7 @@ func TestSendMessageWhenStreamIsBroken(t *testing.T) {
ClusterID: types.ID(1),
Raft: &fakeRaft{},
ServerStats: newServerStats(),
LeaderStats: stats.NewLeaderStats(zap.NewExample(), "1"),
LeaderStats: stats.NewLeaderStats(zaptest.NewLogger(t), "1"),
}
tr.Start()
srv := httptest.NewServer(tr.Handler())
@ -108,7 +107,7 @@ func TestSendMessageWhenStreamIsBroken(t *testing.T) {
ClusterID: types.ID(1),
Raft: p,
ServerStats: newServerStats(),
LeaderStats: stats.NewLeaderStats(zap.NewExample(), "2"),
LeaderStats: stats.NewLeaderStats(zaptest.NewLogger(t), "2"),
}
tr2.Start()
srv2 := httptest.NewServer(tr2.Handler())

View File

@ -31,8 +31,7 @@ import (
"go.etcd.io/etcd/pkg/v3/pbutil"
"go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
func TestServeRaftPrefix(t *testing.T) {
@ -153,7 +152,7 @@ func TestServeRaftPrefix(t *testing.T) {
req.Header.Set("X-Etcd-Cluster-ID", tt.clusterID)
req.Header.Set("X-Server-Version", version.Version)
rw := httptest.NewRecorder()
h := newPipelineHandler(&Transport{Logger: zap.NewExample()}, tt.p, types.ID(0))
h := newPipelineHandler(&Transport{Logger: zaptest.NewLogger(t)}, tt.p, types.ID(0))
// goroutine because the handler panics to disconnect on raft error
donec := make(chan struct{})

View File

@ -28,8 +28,7 @@ import (
"go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/raft/v3/raftpb"
stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
// TestPipelineSend tests that pipeline could send data using roundtripper
@ -38,7 +37,7 @@ func TestPipelineSend(t *testing.T) {
tr := &roundTripperRecorder{rec: testutil.NewRecorderStream()}
picker := mustNewURLPicker(t, []string{"http://localhost:2380"})
tp := &Transport{pipelineRt: tr}
p := startTestPipeline(tp, picker)
p := startTestPipeline(t, tp, picker)
p.msgc <- raftpb.Message{Type: raftpb.MsgApp}
tr.rec.Wait(1)
@ -54,7 +53,7 @@ func TestPipelineKeepSendingWhenPostError(t *testing.T) {
tr := &respRoundTripper{rec: testutil.NewRecorderStream(), err: fmt.Errorf("roundtrip error")}
picker := mustNewURLPicker(t, []string{"http://localhost:2380"})
tp := &Transport{pipelineRt: tr}
p := startTestPipeline(tp, picker)
p := startTestPipeline(t, tp, picker)
defer p.stop()
for i := 0; i < 50; i++ {
@ -71,7 +70,7 @@ func TestPipelineExceedMaximumServing(t *testing.T) {
rt := newRoundTripperBlocker()
picker := mustNewURLPicker(t, []string{"http://localhost:2380"})
tp := &Transport{pipelineRt: rt}
p := startTestPipeline(tp, picker)
p := startTestPipeline(t, tp, picker)
defer p.stop()
// keep the sender busy and make the buffer full
@ -109,7 +108,7 @@ func TestPipelineSendFailed(t *testing.T) {
rt := newRespRoundTripper(0, errors.New("blah"))
rt.rec = testutil.NewRecorderStream()
tp := &Transport{pipelineRt: rt}
p := startTestPipeline(tp, picker)
p := startTestPipeline(t, tp, picker)
p.msgc <- raftpb.Message{Type: raftpb.MsgApp}
if _, err := rt.rec.Wait(1); err != nil {
@ -127,7 +126,7 @@ func TestPipelinePost(t *testing.T) {
tr := &roundTripperRecorder{rec: &testutil.RecorderBuffered{}}
picker := mustNewURLPicker(t, []string{"http://localhost:2380"})
tp := &Transport{ClusterID: types.ID(1), pipelineRt: tr}
p := startTestPipeline(tp, picker)
p := startTestPipeline(t, tp, picker)
if err := p.post([]byte("some data")); err != nil {
t.Fatalf("unexpected post error: %v", err)
}
@ -181,7 +180,7 @@ func TestPipelinePostBad(t *testing.T) {
for i, tt := range tests {
picker := mustNewURLPicker(t, []string{tt.u})
tp := &Transport{pipelineRt: newRespRoundTripper(tt.code, tt.err)}
p := startTestPipeline(tp, picker)
p := startTestPipeline(t, tp, picker)
err := p.post([]byte("some data"))
p.stop()
@ -202,7 +201,7 @@ func TestPipelinePostErrorc(t *testing.T) {
for i, tt := range tests {
picker := mustNewURLPicker(t, []string{tt.u})
tp := &Transport{pipelineRt: newRespRoundTripper(tt.code, tt.err)}
p := startTestPipeline(tp, picker)
p := startTestPipeline(t, tp, picker)
p.post([]byte("some data"))
p.stop()
select {
@ -216,7 +215,7 @@ func TestPipelinePostErrorc(t *testing.T) {
func TestStopBlockedPipeline(t *testing.T) {
picker := mustNewURLPicker(t, []string{"http://localhost:2380"})
tp := &Transport{pipelineRt: newRoundTripperBlocker()}
p := startTestPipeline(tp, picker)
p := startTestPipeline(t, tp, picker)
// send many messages that most of them will be blocked in buffer
for i := 0; i < connPerPipeline*10; i++ {
p.msgc <- raftpb.Message{}
@ -297,12 +296,12 @@ type nopReadCloser struct{}
func (n *nopReadCloser) Read(p []byte) (int, error) { return 0, io.EOF }
func (n *nopReadCloser) Close() error { return nil }
func startTestPipeline(tr *Transport, picker *urlPicker) *pipeline {
func startTestPipeline(t *testing.T, tr *Transport, picker *urlPicker) *pipeline {
p := &pipeline{
peerID: types.ID(1),
tr: tr,
picker: picker,
status: newPeerStatus(zap.NewExample(), tr.ID, types.ID(1)),
status: newPeerStatus(zaptest.NewLogger(t), tr.ID, types.ID(1)),
raft: &fakeRaft{},
followerStats: &stats.FollowerStats{},
errorc: make(chan error, 1),

View File

@ -27,8 +27,7 @@ import (
"go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
type strReaderCloser struct{ *strings.Reader }
@ -99,12 +98,12 @@ func testSnapshotSend(t *testing.T, sm *snap.Message) (bool, []os.DirEntry) {
r := &fakeRaft{}
tr := &Transport{pipelineRt: &http.Transport{}, ClusterID: types.ID(1), Raft: r}
ch := make(chan struct{}, 1)
h := &syncHandler{newSnapshotHandler(tr, r, snap.New(zap.NewExample(), d), types.ID(1)), ch}
h := &syncHandler{newSnapshotHandler(tr, r, snap.New(zaptest.NewLogger(t), d), types.ID(1)), ch}
srv := httptest.NewServer(h)
defer srv.Close()
picker := mustNewURLPicker(t, []string{srv.URL})
snapsend := newSnapshotSender(tr, picker, types.ID(1), newPeerStatus(zap.NewExample(), types.ID(0), types.ID(1)))
snapsend := newSnapshotSender(tr, picker, types.ID(1), newPeerStatus(zaptest.NewLogger(t), types.ID(0), types.ID(1)))
defer snapsend.stop()
snapsend.send(*sm)

View File

@ -31,9 +31,9 @@ import (
"go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/raft/v3/raftpb"
stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats"
"go.uber.org/zap/zaptest"
"github.com/coreos/go-semver/semver"
"go.uber.org/zap"
"golang.org/x/time/rate"
)
@ -41,7 +41,7 @@ import (
// to streamWriter. After that, streamWriter can use it to send messages
// continuously, and closes it when stopped.
func TestStreamWriterAttachOutgoingConn(t *testing.T) {
sw := startStreamWriter(zap.NewExample(), types.ID(0), types.ID(1), newPeerStatus(zap.NewExample(), types.ID(0), types.ID(1)), &stats.FollowerStats{}, &fakeRaft{})
sw := startStreamWriter(zaptest.NewLogger(t), types.ID(0), types.ID(1), newPeerStatus(zaptest.NewLogger(t), types.ID(0), types.ID(1)), &stats.FollowerStats{}, &fakeRaft{})
// the expected initial state of streamWriter is not working
if _, ok := sw.writec(); ok {
t.Errorf("initial working status = %v, want false", ok)
@ -93,7 +93,7 @@ func TestStreamWriterAttachOutgoingConn(t *testing.T) {
// TestStreamWriterAttachBadOutgoingConn tests that streamWriter with bad
// outgoingConn will close the outgoingConn and fall back to non-working status.
func TestStreamWriterAttachBadOutgoingConn(t *testing.T) {
sw := startStreamWriter(zap.NewExample(), types.ID(0), types.ID(1), newPeerStatus(zap.NewExample(), types.ID(0), types.ID(1)), &stats.FollowerStats{}, &fakeRaft{})
sw := startStreamWriter(zaptest.NewLogger(t), types.ID(0), types.ID(1), newPeerStatus(zaptest.NewLogger(t), types.ID(0), types.ID(1)), &stats.FollowerStats{}, &fakeRaft{})
defer sw.stop()
wfc := newFakeWriteFlushCloser(errors.New("blah"))
sw.attach(&outgoingConn{t: streamTypeMessage, Writer: wfc, Flusher: wfc, Closer: wfc})
@ -127,7 +127,7 @@ func TestStreamReaderDialRequest(t *testing.T) {
}
req := act[0].Params[0].(*http.Request)
wurl := fmt.Sprintf("http://localhost:2380" + tt.endpoint(zap.NewExample()) + "/1")
wurl := fmt.Sprintf("http://localhost:2380" + tt.endpoint(zaptest.NewLogger(t)) + "/1")
if req.URL.String() != wurl {
t.Errorf("#%d: url = %s, want %s", i, req.URL.String(), wurl)
}
@ -197,7 +197,7 @@ func TestStreamReaderStopOnDial(t *testing.T) {
picker: mustNewURLPicker(t, []string{"http://localhost:2380"}),
errorc: make(chan error, 1),
typ: streamTypeMessage,
status: newPeerStatus(zap.NewExample(), types.ID(1), types.ID(2)),
status: newPeerStatus(zaptest.NewLogger(t), types.ID(1), types.ID(2)),
rl: rate.NewLimiter(rate.Every(100*time.Millisecond), 1),
}
tr.onResp = func() {
@ -304,7 +304,7 @@ func TestStream(t *testing.T) {
srv := httptest.NewServer(h)
defer srv.Close()
sw := startStreamWriter(zap.NewExample(), types.ID(0), types.ID(1), newPeerStatus(zap.NewExample(), types.ID(0), types.ID(1)), &stats.FollowerStats{}, &fakeRaft{})
sw := startStreamWriter(zaptest.NewLogger(t), types.ID(0), types.ID(1), newPeerStatus(zaptest.NewLogger(t), types.ID(0), types.ID(1)), &stats.FollowerStats{}, &fakeRaft{})
defer sw.stop()
h.sw = sw
@ -316,7 +316,7 @@ func TestStream(t *testing.T) {
typ: tt.t,
tr: tr,
picker: picker,
status: newPeerStatus(zap.NewExample(), types.ID(0), types.ID(2)),
status: newPeerStatus(zaptest.NewLogger(t), types.ID(0), types.ID(2)),
recvc: recvc,
propc: propc,
rl: rate.NewLimiter(rate.Every(100*time.Millisecond), 1),

View File

@ -25,8 +25,7 @@ import (
"go.etcd.io/etcd/raft/v3"
"go.etcd.io/etcd/raft/v3/raftpb"
stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
func BenchmarkSendingMsgApp(b *testing.B) {
@ -36,7 +35,7 @@ func BenchmarkSendingMsgApp(b *testing.B) {
ClusterID: types.ID(1),
Raft: &fakeRaft{},
ServerStats: newServerStats(),
LeaderStats: stats.NewLeaderStats(zap.NewExample(), "1"),
LeaderStats: stats.NewLeaderStats(zaptest.NewLogger(b), "1"),
}
tr.Start()
srv := httptest.NewServer(tr.Handler())
@ -49,7 +48,7 @@ func BenchmarkSendingMsgApp(b *testing.B) {
ClusterID: types.ID(1),
Raft: r,
ServerStats: newServerStats(),
LeaderStats: stats.NewLeaderStats(zap.NewExample(), "2"),
LeaderStats: stats.NewLeaderStats(zaptest.NewLogger(b), "2"),
}
tr2.Start()
srv2 := httptest.NewServer(tr2.Handler())

View File

@ -24,9 +24,9 @@ import (
"go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/raft/v3/raftpb"
stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats"
"go.uber.org/zap/zaptest"
"github.com/xiang90/probing"
"go.uber.org/zap"
)
// TestTransportSend tests that transport can send messages using correct
@ -96,7 +96,7 @@ func TestTransportCutMend(t *testing.T) {
}
func TestTransportAdd(t *testing.T) {
ls := stats.NewLeaderStats(zap.NewExample(), "")
ls := stats.NewLeaderStats(zaptest.NewLogger(t), "")
tr := &Transport{
LeaderStats: ls,
streamRt: &roundTripperRecorder{},
@ -127,7 +127,7 @@ func TestTransportAdd(t *testing.T) {
func TestTransportRemove(t *testing.T) {
tr := &Transport{
LeaderStats: stats.NewLeaderStats(zap.NewExample(), ""),
LeaderStats: stats.NewLeaderStats(zaptest.NewLogger(t), ""),
streamRt: &roundTripperRecorder{},
peers: make(map[types.ID]Peer),
pipelineProber: probing.NewProber(nil),
@ -161,7 +161,7 @@ func TestTransportErrorc(t *testing.T) {
errorc := make(chan error, 1)
tr := &Transport{
Raft: &fakeRaft{},
LeaderStats: stats.NewLeaderStats(zap.NewExample(), ""),
LeaderStats: stats.NewLeaderStats(zaptest.NewLogger(t), ""),
ErrorC: errorc,
streamRt: newRespRoundTripper(http.StatusForbidden, nil),
pipelineRt: newRespRoundTripper(http.StatusForbidden, nil),

View File

@ -25,7 +25,7 @@ import (
"go.etcd.io/etcd/client/pkg/v3/fileutil"
"go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/storage/wal/walpb"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
var testSnap = &raftpb.Snapshot{
@ -46,7 +46,7 @@ func TestSaveAndLoad(t *testing.T) {
t.Fatal(err)
}
defer os.RemoveAll(dir)
ss := New(zap.NewExample(), dir)
ss := New(zaptest.NewLogger(t), dir)
err = ss.save(testSnap)
if err != nil {
t.Fatal(err)
@ -68,7 +68,7 @@ func TestBadCRC(t *testing.T) {
t.Fatal(err)
}
defer os.RemoveAll(dir)
ss := New(zap.NewExample(), dir)
ss := New(zaptest.NewLogger(t), dir)
err = ss.save(testSnap)
if err != nil {
t.Fatal(err)
@ -78,7 +78,7 @@ func TestBadCRC(t *testing.T) {
// fake a crc mismatch
crcTable = crc32.MakeTable(crc32.Koopman)
_, err = Read(zap.NewExample(), filepath.Join(dir, fmt.Sprintf("%016x-%016x.snap", 1, 1)))
_, err = Read(zaptest.NewLogger(t), filepath.Join(dir, fmt.Sprintf("%016x-%016x.snap", 1, 1)))
if err == nil || err != ErrCRCMismatch {
t.Errorf("err = %v, want %v", err, ErrCRCMismatch)
}
@ -98,7 +98,7 @@ func TestFailback(t *testing.T) {
t.Fatal(err)
}
ss := New(zap.NewExample(), dir)
ss := New(zaptest.NewLogger(t), dir)
err = ss.save(testSnap)
if err != nil {
t.Fatal(err)
@ -133,7 +133,7 @@ func TestSnapNames(t *testing.T) {
f.Close()
}
}
ss := New(zap.NewExample(), dir)
ss := New(zaptest.NewLogger(t), dir)
names, err := ss.snapNames()
if err != nil {
t.Errorf("err = %v, want nil", err)
@ -154,7 +154,7 @@ func TestLoadNewestSnap(t *testing.T) {
t.Fatal(err)
}
defer os.RemoveAll(dir)
ss := New(zap.NewExample(), dir)
ss := New(zaptest.NewLogger(t), dir)
err = ss.save(testSnap)
if err != nil {
t.Fatal(err)
@ -218,7 +218,7 @@ func TestNoSnapshot(t *testing.T) {
t.Fatal(err)
}
defer os.RemoveAll(dir)
ss := New(zap.NewExample(), dir)
ss := New(zaptest.NewLogger(t), dir)
_, err = ss.Load()
if err != ErrNoSnapshot {
t.Errorf("err = %v, want %v", err, ErrNoSnapshot)
@ -238,7 +238,7 @@ func TestEmptySnapshot(t *testing.T) {
t.Fatal(err)
}
_, err = Read(zap.NewExample(), filepath.Join(dir, "1.snap"))
_, err = Read(zaptest.NewLogger(t), filepath.Join(dir, "1.snap"))
if err != ErrEmptySnapshot {
t.Errorf("err = %v, want %v", err, ErrEmptySnapshot)
}
@ -259,7 +259,7 @@ func TestAllSnapshotBroken(t *testing.T) {
t.Fatal(err)
}
ss := New(zap.NewExample(), dir)
ss := New(zaptest.NewLogger(t), dir)
_, err = ss.Load()
if err != ErrNoSnapshot {
t.Errorf("err = %v, want %v", err, ErrNoSnapshot)
@ -282,7 +282,7 @@ func TestReleaseSnapDBs(t *testing.T) {
}
}
ss := New(zap.NewExample(), dir)
ss := New(zaptest.NewLogger(t), dir)
if err := ss.ReleaseSnapDBs(raftpb.Snapshot{Metadata: raftpb.SnapshotMetadata{Index: 300}}); err != nil {
t.Fatal(err)

View File

@ -29,9 +29,9 @@ import (
"go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/client/v2"
"go.uber.org/zap/zaptest"
"github.com/jonboulle/clockwork"
"go.uber.org/zap"
)
const (
@ -39,7 +39,7 @@ const (
)
func TestNewProxyFuncUnset(t *testing.T) {
pf, err := newProxyFunc(zap.NewExample(), "")
pf, err := newProxyFunc(zaptest.NewLogger(t), "")
if pf != nil {
t.Fatal("unexpected non-nil proxyFunc")
}
@ -54,7 +54,7 @@ func TestNewProxyFuncBad(t *testing.T) {
"http://foo.com/%1",
}
for i, in := range tests {
pf, err := newProxyFunc(zap.NewExample(), in)
pf, err := newProxyFunc(zaptest.NewLogger(t), in)
if pf != nil {
t.Errorf("#%d: unexpected non-nil proxyFunc", i)
}
@ -70,7 +70,7 @@ func TestNewProxyFunc(t *testing.T) {
"http://disco.foo.bar": "http://disco.foo.bar",
}
for in, w := range tests {
pf, err := newProxyFunc(zap.NewExample(), in)
pf, err := newProxyFunc(zaptest.NewLogger(t), in)
if pf == nil {
t.Errorf("%s: unexpected nil proxyFunc", in)
continue
@ -197,12 +197,12 @@ func TestCheckCluster(t *testing.T) {
})
}
c := &clientWithResp{rs: rs}
dBase := newTestDiscovery(cluster, 1, c)
dBase := newTestDiscovery(t, cluster, 1, c)
cRetry := &clientWithRetry{failTimes: 3}
cRetry.rs = rs
fc := clockwork.NewFakeClock()
dRetry := newTestDiscoveryWithClock(cluster, 1, cRetry, fc)
dRetry := newTestDiscoveryWithClock(t, cluster, 1, cRetry, fc)
for _, d := range []*discovery{dBase, dRetry} {
go func() {
@ -267,7 +267,7 @@ func TestWaitNodes(t *testing.T) {
for i, tt := range tests {
// Basic case
c := &clientWithResp{rs: nil, w: &watcherWithResp{rs: tt.rs}}
dBase := newTestDiscovery("1000", 1, c)
dBase := newTestDiscovery(t, "1000", 1, c)
// Retry case
var retryScanResp []*client.Response
@ -289,7 +289,7 @@ func TestWaitNodes(t *testing.T) {
w: &watcherWithRetry{rs: tt.rs, failTimes: 2},
}
fc := clockwork.NewFakeClock()
dRetry := newTestDiscoveryWithClock("1000", 1, cRetry, fc)
dRetry := newTestDiscoveryWithClock(t, "1000", 1, cRetry, fc)
for _, d := range []*discovery{dBase, dRetry} {
go func() {
@ -335,7 +335,7 @@ func TestCreateSelf(t *testing.T) {
}
for i, tt := range tests {
d := newTestDiscovery("1000", 1, tt.c)
d := newTestDiscovery(t, "1000", 1, tt.c)
if err := d.createSelf(""); err != tt.werr {
t.Errorf("#%d: err = %v, want %v", i, err, nil)
}
@ -428,7 +428,7 @@ func TestRetryFailure(t *testing.T) {
cluster := "1000"
c := &clientWithRetry{failTimes: 4}
fc := clockwork.NewFakeClock()
d := newTestDiscoveryWithClock(cluster, 1, c, fc)
d := newTestDiscoveryWithClock(t, cluster, 1, c, fc)
go func() {
for i := uint(1); i <= maxRetryInTest; i++ {
fc.BlockUntil(1)
@ -551,9 +551,9 @@ func (w *watcherWithRetry) Next(context.Context) (*client.Response, error) {
return r, nil
}
func newTestDiscovery(cluster string, id types.ID, c client.KeysAPI) *discovery {
func newTestDiscovery(t *testing.T, cluster string, id types.ID, c client.KeysAPI) *discovery {
return &discovery{
lg: zap.NewExample(),
lg: zaptest.NewLogger(t),
cluster: cluster,
id: id,
c: c,
@ -561,9 +561,9 @@ func newTestDiscovery(cluster string, id types.ID, c client.KeysAPI) *discovery
}
}
func newTestDiscoveryWithClock(cluster string, id types.ID, c client.KeysAPI, clock clockwork.Clock) *discovery {
func newTestDiscoveryWithClock(t *testing.T, cluster string, id types.ID, c client.KeysAPI, clock clockwork.Clock) *discovery {
return &discovery{
lg: zap.NewExample(),
lg: zaptest.NewLogger(t),
cluster: cluster,
id: id,
c: c,

View File

@ -21,9 +21,9 @@ import (
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/client/pkg/v3/testutil"
"go.uber.org/zap/zaptest"
"github.com/jonboulle/clockwork"
"go.uber.org/zap"
)
func TestPeriodicHourly(t *testing.T) {
@ -34,7 +34,7 @@ func TestPeriodicHourly(t *testing.T) {
// TODO: Do not depand or real time (Recorder.Wait) in unit tests.
rg := &fakeRevGetter{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond), 0}
compactable := &fakeCompactable{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond)}
tb := newPeriodic(zap.NewExample(), fc, retentionDuration, rg, compactable)
tb := newPeriodic(zaptest.NewLogger(t), fc, retentionDuration, rg, compactable)
tb.Run()
defer tb.Stop()
@ -85,7 +85,7 @@ func TestPeriodicMinutes(t *testing.T) {
fc := clockwork.NewFakeClock()
rg := &fakeRevGetter{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond), 0}
compactable := &fakeCompactable{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond)}
tb := newPeriodic(zap.NewExample(), fc, retentionDuration, rg, compactable)
tb := newPeriodic(zaptest.NewLogger(t), fc, retentionDuration, rg, compactable)
tb.Run()
defer tb.Stop()
@ -133,7 +133,7 @@ func TestPeriodicPause(t *testing.T) {
retentionDuration := time.Hour
rg := &fakeRevGetter{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond), 0}
compactable := &fakeCompactable{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond)}
tb := newPeriodic(zap.NewExample(), fc, retentionDuration, rg, compactable)
tb := newPeriodic(zaptest.NewLogger(t), fc, retentionDuration, rg, compactable)
tb.Run()
tb.Pause()

View File

@ -21,16 +21,16 @@ import (
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/client/pkg/v3/testutil"
"go.uber.org/zap/zaptest"
"github.com/jonboulle/clockwork"
"go.uber.org/zap"
)
func TestRevision(t *testing.T) {
fc := clockwork.NewFakeClock()
rg := &fakeRevGetter{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond), 0}
compactable := &fakeCompactable{testutil.NewRecorderStreamWithWaitTimout(10 * time.Millisecond)}
tb := newRevision(zap.NewExample(), fc, 10, rg, compactable)
tb := newRevision(zaptest.NewLogger(t), fc, 10, rg, compactable)
tb.Run()
defer tb.Stop()
@ -73,7 +73,7 @@ func TestRevisionPause(t *testing.T) {
fc := clockwork.NewFakeClock()
rg := &fakeRevGetter{testutil.NewRecorderStream(), 99} // will be 100
compactable := &fakeCompactable{testutil.NewRecorderStream()}
tb := newRevision(zap.NewExample(), fc, 10, rg, compactable)
tb := newRevision(zaptest.NewLogger(t), fc, 10, rg, compactable)
tb.Run()
tb.Pause()

View File

@ -10,9 +10,9 @@ import (
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/client/v3"
"go.uber.org/zap/zaptest"
"github.com/jonboulle/clockwork"
"go.uber.org/zap"
)
// fakeKVForClusterSize is used to test getClusterSize.
@ -62,12 +62,9 @@ func TestGetClusterSize(t *testing.T) {
},
}
lg, err := zap.NewProduction()
if err != nil {
t.Errorf("Failed to create a logger, error: %v", err)
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
lg := zaptest.NewLogger(t)
d := &discovery{
lg: lg,
c: &clientv3.Client{
@ -178,10 +175,7 @@ func TestGetClusterMembers(t *testing.T) {
},
}
lg, err := zap.NewProduction()
if err != nil {
t.Errorf("Failed to create a logger, error: %v", err)
}
lg := zaptest.NewLogger(t)
d := &discovery{
lg: lg,
@ -356,10 +350,7 @@ func TestCheckCluster(t *testing.T) {
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
lg, err := zap.NewProduction()
if err != nil {
t.Errorf("Failed to create a logger, error: %v", err)
}
lg := zaptest.NewLogger(t)
fkv := &fakeKVForCheckCluster{
fakeBaseKV: &fakeBaseKV{},
@ -469,13 +460,9 @@ func TestRegisterSelf(t *testing.T) {
},
}
lg, err := zap.NewProduction()
if err != nil {
t.Errorf("Failed to create a logger, error: %v", err)
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
lg := zaptest.NewLogger(t)
fkv := &fakeKVForRegisterSelf{
fakeBaseKV: &fakeBaseKV{},
t: t,
@ -597,10 +584,7 @@ func TestWaitPeers(t *testing.T) {
},
}
lg, err := zap.NewProduction()
if err != nil {
t.Errorf("Failed to create a logger, error: %v", err)
}
lg := zaptest.NewLogger(t)
d := &discovery{
lg: lg,

View File

@ -20,11 +20,6 @@ package etcdserver
import (
"encoding/json"
"fmt"
bolt "go.etcd.io/bbolt"
"go.etcd.io/etcd/server/v3/storage/datadir"
"go.etcd.io/etcd/server/v3/storage/schema"
"go.etcd.io/etcd/server/v3/storage/wal"
"go.etcd.io/etcd/server/v3/storage/wal/walpb"
"io"
"net/http"
"os"
@ -32,6 +27,13 @@ import (
"strings"
"testing"
bolt "go.etcd.io/bbolt"
"go.etcd.io/etcd/server/v3/storage/datadir"
"go.etcd.io/etcd/server/v3/storage/schema"
"go.etcd.io/etcd/server/v3/storage/wal"
"go.etcd.io/etcd/server/v3/storage/wal/walpb"
"go.uber.org/zap/zaptest"
"go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/client/pkg/v3/types"
@ -41,7 +43,6 @@ import (
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
"go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
serverstorage "go.etcd.io/etcd/server/v3/storage"
"go.uber.org/zap"
)
func TestBootstrapExistingClusterNoWALMaxLearner(t *testing.T) {
@ -95,7 +96,7 @@ func TestBootstrapExistingClusterNoWALMaxLearner(t *testing.T) {
cfg := config.ServerConfig{
Name: "node0",
InitialPeerURLsMap: cluster,
Logger: zap.NewExample(),
Logger: zaptest.NewLogger(t),
ExperimentalMaxLearners: tt.maxLearner,
}
_, err = bootstrapExistingClusterNoWAL(cfg, mockBootstrapRoundTrip(tt.members))
@ -184,7 +185,7 @@ func TestBootstrapBackend(t *testing.T) {
Name: "demoNode",
DataDir: dataDir,
BackendFreelistType: bolt.FreelistArrayType,
Logger: zap.NewExample(),
Logger: zaptest.NewLogger(t),
}
if tt.prepareData != nil {

View File

@ -19,13 +19,11 @@ import (
"go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/client/pkg/v3/types"
"go.uber.org/zap/zaptest"
"github.com/coreos/go-semver/semver"
"go.uber.org/zap"
)
var testLogger = zap.NewExample()
func TestIsCompatibleWithVers(t *testing.T) {
tests := []struct {
vers map[string]*version.Versions
@ -90,7 +88,7 @@ func TestIsCompatibleWithVers(t *testing.T) {
}
for i, tt := range tests {
ok := isCompatibleWithVers(testLogger, tt.vers, tt.local, tt.minV, tt.maxV)
ok := isCompatibleWithVers(zaptest.NewLogger(t), tt.vers, tt.local, tt.minV, tt.maxV)
if ok != tt.wok {
t.Errorf("#%d: ok = %+v, want %+v", i, ok, tt.wok)
}

View File

@ -29,7 +29,7 @@ import (
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
"go.etcd.io/etcd/server/v3/mock/mockstorage"
serverstorage "go.etcd.io/etcd/server/v3/storage"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
func TestGetIDs(t *testing.T) {
@ -67,7 +67,7 @@ func TestGetIDs(t *testing.T) {
if tt.confState != nil {
snap.Metadata.ConfState = *tt.confState
}
idSet := serverstorage.GetEffectiveNodeIDsFromWalEntries(testLogger, &snap, tt.ents)
idSet := serverstorage.GetEffectiveNodeIDsFromWalEntries(zaptest.NewLogger(t), &snap, tt.ents)
if !reflect.DeepEqual(idSet, tt.widSet) {
t.Errorf("#%d: idset = %#v, want %#v", i, idSet, tt.widSet)
}
@ -147,7 +147,7 @@ func TestCreateConfigChangeEnts(t *testing.T) {
}
for i, tt := range tests {
gents := serverstorage.CreateConfigChangeEnts(testLogger, tt.ids, tt.self, tt.term, tt.index)
gents := serverstorage.CreateConfigChangeEnts(zaptest.NewLogger(t), tt.ids, tt.self, tt.term, tt.index)
if !reflect.DeepEqual(gents, tt.wents) {
t.Errorf("#%d: ents = %v, want %v", i, gents, tt.wents)
}
@ -157,13 +157,13 @@ func TestCreateConfigChangeEnts(t *testing.T) {
func TestStopRaftWhenWaitingForApplyDone(t *testing.T) {
n := newNopReadyNode()
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
lg: zaptest.NewLogger(t),
Node: n,
storage: mockstorage.NewStorageRecorder(""),
raftStorage: raft.NewMemoryStorage(),
transport: newNopTransporter(),
})
srv := &EtcdServer{lgMu: new(sync.RWMutex), lg: zap.NewExample(), r: *r}
srv := &EtcdServer{lgMu: new(sync.RWMutex), lg: zaptest.NewLogger(t), r: *r}
srv.r.start(nil)
n.readyc <- raft.Ready{}
select {
@ -185,13 +185,13 @@ func TestConfigChangeBlocksApply(t *testing.T) {
n := newNopReadyNode()
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
lg: zaptest.NewLogger(t),
Node: n,
storage: mockstorage.NewStorageRecorder(""),
raftStorage: raft.NewMemoryStorage(),
transport: newNopTransporter(),
})
srv := &EtcdServer{lgMu: new(sync.RWMutex), lg: zap.NewExample(), r: *r}
srv := &EtcdServer{lgMu: new(sync.RWMutex), lg: zaptest.NewLogger(t), r: *r}
srv.r.start(&raftReadyHandler{
getLead: func() uint64 { return 0 },
@ -231,13 +231,13 @@ func TestConfigChangeBlocksApply(t *testing.T) {
func TestProcessDuplicatedAppRespMessage(t *testing.T) {
n := newNopReadyNode()
cl := membership.NewCluster(zap.NewExample())
cl := membership.NewCluster(zaptest.NewLogger(t))
rs := raft.NewMemoryStorage()
p := mockstorage.NewStorageRecorder("")
tr, sendc := newSendMsgAppRespTransporter()
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
lg: zaptest.NewLogger(t),
isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
Node: n,
transport: tr,
@ -247,7 +247,7 @@ func TestProcessDuplicatedAppRespMessage(t *testing.T) {
s := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
lg: zaptest.NewLogger(t),
r: *r,
cluster: cl,
SyncTicker: &time.Ticker{},

View File

@ -101,7 +101,7 @@ func TestDoLocalAction(t *testing.T) {
st := mockstore.NewRecorder()
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
lg: zaptest.NewLogger(t),
v2store: st,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
}
@ -156,7 +156,7 @@ func TestDoBadLocalAction(t *testing.T) {
st := mockstore.NewErrRecorder(storeErr)
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
lg: zaptest.NewLogger(t),
v2store: st,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
}
@ -186,7 +186,7 @@ func TestApplyRepeat(t *testing.T) {
cl.SetStore(v2store.New())
cl.AddMember(&membership.Member{ID: 1234}, true)
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
lg: zaptest.NewLogger(t),
Node: n,
raftStorage: raft.NewMemoryStorage(),
storage: mockstorage.NewStorageRecorder(""),
@ -194,7 +194,7 @@ func TestApplyRepeat(t *testing.T) {
})
s := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
lg: zaptest.NewLogger(t),
r: *r,
v2store: st,
cluster: cl,
@ -469,7 +469,7 @@ func TestApplyRequest(t *testing.T) {
st := mockstore.NewRecorder()
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
lg: zaptest.NewLogger(t),
v2store: st,
}
srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster}
@ -489,7 +489,7 @@ func TestApplyRequestOnAdminMemberAttributes(t *testing.T) {
cl := newTestCluster(t, []*membership.Member{{ID: 1}})
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
lg: zaptest.NewLogger(t),
v2store: mockstore.NewRecorder(),
cluster: cl,
}
@ -575,8 +575,8 @@ func TestApplyConfChangeError(t *testing.T) {
n := newNodeRecorder()
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
r: *newRaftNode(raftNodeConfig{lg: zap.NewExample(), Node: n}),
lg: zaptest.NewLogger(t),
r: *newRaftNode(raftNodeConfig{lg: zaptest.NewLogger(t), Node: n}),
cluster: cl,
}
_, err := srv.applyConfChange(tt.cc, nil, true)
@ -603,7 +603,7 @@ func TestApplyConfChangeShouldStop(t *testing.T) {
cl.AddMember(&membership.Member{ID: types.ID(i)}, true)
}
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
lg: zaptest.NewLogger(t),
Node: newNodeNop(),
transport: newNopTransporter(),
})
@ -778,7 +778,7 @@ func TestDoProposal(t *testing.T) {
for i, tt := range tests {
st := mockstore.NewRecorder()
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
lg: zaptest.NewLogger(t),
Node: newNodeCommitter(),
storage: mockstorage.NewStorageRecorder(""),
raftStorage: raft.NewMemoryStorage(),
@ -786,8 +786,8 @@ func TestDoProposal(t *testing.T) {
})
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: config.ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
lg: zaptest.NewLogger(t),
Cfg: config.ServerConfig{Logger: zaptest.NewLogger(t), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
r: *r,
v2store: st,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
@ -818,8 +818,8 @@ func TestDoProposalCancelled(t *testing.T) {
wt := mockwait.NewRecorder()
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: config.ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
lg: zaptest.NewLogger(t),
Cfg: config.ServerConfig{Logger: zaptest.NewLogger(t), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
r: *newRaftNode(raftNodeConfig{Node: newNodeNop()}),
w: wt,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
@ -842,8 +842,8 @@ func TestDoProposalCancelled(t *testing.T) {
func TestDoProposalTimeout(t *testing.T) {
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: config.ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
lg: zaptest.NewLogger(t),
Cfg: config.ServerConfig{Logger: zaptest.NewLogger(t), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
r: *newRaftNode(raftNodeConfig{Node: newNodeNop()}),
w: mockwait.NewNop(),
reqIDGen: idutil.NewGenerator(0, time.Time{}),
@ -861,9 +861,9 @@ func TestDoProposalTimeout(t *testing.T) {
func TestDoProposalStopped(t *testing.T) {
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: config.ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
r: *newRaftNode(raftNodeConfig{lg: zap.NewExample(), Node: newNodeNop()}),
lg: zaptest.NewLogger(t),
Cfg: config.ServerConfig{Logger: zaptest.NewLogger(t), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
r: *newRaftNode(raftNodeConfig{lg: zaptest.NewLogger(t), Node: newNodeNop()}),
w: mockwait.NewNop(),
reqIDGen: idutil.NewGenerator(0, time.Time{}),
}
@ -883,8 +883,8 @@ func TestSync(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
r: *newRaftNode(raftNodeConfig{lg: zap.NewExample(), Node: n}),
lg: zaptest.NewLogger(t),
r: *newRaftNode(raftNodeConfig{lg: zaptest.NewLogger(t), Node: n}),
reqIDGen: idutil.NewGenerator(0, time.Time{}),
ctx: ctx,
cancel: cancel,
@ -928,8 +928,8 @@ func TestSyncTimeout(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
r: *newRaftNode(raftNodeConfig{lg: zap.NewExample(), Node: n}),
lg: zaptest.NewLogger(t),
r: *newRaftNode(raftNodeConfig{lg: zaptest.NewLogger(t), Node: n}),
reqIDGen: idutil.NewGenerator(0, time.Time{}),
ctx: ctx,
cancel: cancel,
@ -963,7 +963,7 @@ func TestSyncTrigger(t *testing.T) {
st := make(chan time.Time, 1)
tk := &time.Ticker{C: st}
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
lg: zaptest.NewLogger(t),
Node: n,
raftStorage: raft.NewMemoryStorage(),
transport: newNopTransporter(),
@ -972,8 +972,8 @@ func TestSyncTrigger(t *testing.T) {
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: config.ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
lg: zaptest.NewLogger(t),
Cfg: config.ServerConfig{Logger: zaptest.NewLogger(t), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
r: *r,
v2store: mockstore.NewNop(),
SyncTicker: tk,
@ -1023,19 +1023,19 @@ func TestSnapshot(t *testing.T) {
st := mockstore.NewRecorderStream()
p := mockstorage.NewStorageRecorderStream("")
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
lg: zaptest.NewLogger(t),
Node: newNodeNop(),
raftStorage: s,
storage: p,
})
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
lg: zaptest.NewLogger(t),
r: *r,
v2store: st,
consistIndex: cindex.NewConsistentIndex(be),
}
srv.kv = mvcc.New(zap.NewExample(), be, &lease.FakeLessor{}, mvcc.StoreConfig{})
srv.kv = mvcc.New(zaptest.NewLogger(t), be, &lease.FakeLessor{}, mvcc.StoreConfig{})
srv.be = be
ch := make(chan struct{}, 2)
@ -1095,7 +1095,7 @@ func TestSnapshotOrdering(t *testing.T) {
rs := raft.NewMemoryStorage()
p := mockstorage.NewStorageRecorderStream(testdir)
tr, snapDoneC := newSnapTransporter(snapdir)
tr, snapDoneC := newSnapTransporter(lg, snapdir)
r := newRaftNode(raftNodeConfig{
lg: lg,
isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
@ -1176,7 +1176,7 @@ func TestTriggerSnap(t *testing.T) {
st := mockstore.NewRecorder()
p := mockstorage.NewStorageRecorderStream("")
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
lg: zaptest.NewLogger(t),
Node: newNodeCommitter(),
raftStorage: raft.NewMemoryStorage(),
storage: p,
@ -1184,8 +1184,8 @@ func TestTriggerSnap(t *testing.T) {
})
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: config.ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCount: uint64(snapc), SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
lg: zaptest.NewLogger(t),
Cfg: config.ServerConfig{Logger: zaptest.NewLogger(t), TickMs: 1, SnapshotCount: uint64(snapc), SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
r: *r,
v2store: st,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
@ -1194,7 +1194,7 @@ func TestTriggerSnap(t *testing.T) {
}
srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster}
srv.kv = mvcc.New(zap.NewExample(), be, &lease.FakeLessor{}, mvcc.StoreConfig{})
srv.kv = mvcc.New(zaptest.NewLogger(t), be, &lease.FakeLessor{}, mvcc.StoreConfig{})
srv.be = be
srv.start()
@ -1244,7 +1244,7 @@ func TestConcurrentApplyAndSnapshotV3(t *testing.T) {
}
rs := raft.NewMemoryStorage()
tr, snapDoneC := newSnapTransporter(testdir)
tr, snapDoneC := newSnapTransporter(lg, testdir)
r := newRaftNode(raftNodeConfig{
lg: lg,
isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
@ -1390,7 +1390,7 @@ func TestRemoveMember(t *testing.T) {
})
s := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
lg: zaptest.NewLogger(t),
r: *r,
v2store: st,
cluster: cl,
@ -1513,14 +1513,14 @@ func TestPublishV3(t *testing.T) {
func TestPublishV3Stopped(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
r := newRaftNode(raftNodeConfig{
lg: zap.NewExample(),
lg: zaptest.NewLogger(t),
Node: newNodeNop(),
transport: newNopTransporter(),
})
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: config.ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
lg: zaptest.NewLogger(t),
Cfg: config.ServerConfig{Logger: zaptest.NewLogger(t), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
r: *r,
cluster: &membership.RaftCluster{},
w: mockwait.NewNop(),
@ -1595,10 +1595,10 @@ func TestUpdateVersion(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
lg: zaptest.NewLogger(t),
id: 1,
Cfg: config.ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
r: *newRaftNode(raftNodeConfig{lg: zap.NewExample(), Node: n}),
Cfg: config.ServerConfig{Logger: zaptest.NewLogger(t), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
r: *newRaftNode(raftNodeConfig{lg: zaptest.NewLogger(t), Node: n}),
attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://node1.com"}},
cluster: &membership.RaftCluster{},
w: w,
@ -1636,7 +1636,7 @@ func TestUpdateVersion(t *testing.T) {
func TestStopNotify(t *testing.T) {
s := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
lg: zaptest.NewLogger(t),
stop: make(chan struct{}),
done: make(chan struct{}),
}
@ -1873,16 +1873,17 @@ type snapTransporter struct {
nopTransporter
snapDoneC chan snap.Message
snapDir string
lg *zap.Logger
}
func newSnapTransporter(snapDir string) (rafthttp.Transporter, <-chan snap.Message) {
func newSnapTransporter(lg *zap.Logger, snapDir string) (rafthttp.Transporter, <-chan snap.Message) {
ch := make(chan snap.Message, 1)
tr := &snapTransporter{snapDoneC: ch, snapDir: snapDir}
tr := &snapTransporter{snapDoneC: ch, snapDir: snapDir, lg: lg}
return tr, ch
}
func (s *snapTransporter) SendSnapshot(m snap.Message) {
ss := snap.New(zap.NewExample(), s.snapDir)
ss := snap.New(s.lg, s.snapDir)
ss.SaveDBFrom(m.ReadCloser, m.Snapshot.Metadata.Index+1)
m.CloseWithError(nil)
s.snapDoneC <- m

View File

@ -16,9 +16,11 @@ package etcdserver
import (
"errors"
"go.etcd.io/etcd/raft/v3/raftpb"
"testing"
"time"
"go.etcd.io/etcd/raft/v3/raftpb"
"go.uber.org/zap/zaptest"
)
func BenchmarkWarnOfExpensiveRequestNoLog(b *testing.B) {
@ -45,6 +47,6 @@ func BenchmarkWarnOfExpensiveRequestNoLog(b *testing.B) {
}
err := errors.New("benchmarking warn of expensive request")
for n := 0; n < b.N; n++ {
warnOfExpensiveRequest(testLogger, time.Second, time.Now(), nil, m, err)
warnOfExpensiveRequest(zaptest.NewLogger(b), time.Second, time.Now(), nil, m, err)
}
}

View File

@ -19,7 +19,7 @@ import (
"testing"
"time"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
"go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/raft/v3/raftpb"
@ -33,7 +33,7 @@ func TestLongestConnected(t *testing.T) {
if err != nil {
t.Fatal(err)
}
clus, err := membership.NewClusterFromURLsMap(zap.NewExample(), "test", umap)
clus, err := membership.NewClusterFromURLsMap(zaptest.NewLogger(t), "test", umap)
if err != nil {
t.Fatal(err)
}

View File

@ -21,7 +21,7 @@ import (
"testing"
"time"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
func TestNewDirectorScheme(t *testing.T) {
@ -55,7 +55,7 @@ func TestNewDirectorScheme(t *testing.T) {
uf := func() []string {
return tt.urls
}
got := newDirector(zap.NewExample(), uf, time.Minute, time.Minute)
got := newDirector(zaptest.NewLogger(t), uf, time.Minute, time.Minute)
var gep []string
for _, ep := range got.ep {

View File

@ -22,7 +22,7 @@ import (
"testing"
"time"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
func TestReadonlyHandler(t *testing.T) {
@ -73,7 +73,7 @@ func TestConfigHandlerGET(t *testing.T) {
t.Fatal(err)
}
lg := zap.NewExample()
lg := zaptest.NewLogger(t)
rp := reverseProxy{
lg: lg,
director: &director{

View File

@ -24,7 +24,7 @@ import (
"reflect"
"testing"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
type staticRoundTripper struct {
@ -38,7 +38,7 @@ func (srt *staticRoundTripper) RoundTrip(*http.Request) (*http.Response, error)
func TestReverseProxyServe(t *testing.T) {
u := url.URL{Scheme: "http", Host: "192.0.2.3:4040"}
lg := zap.NewExample()
lg := zaptest.NewLogger(t)
tests := []struct {
eps []*endpoint

View File

@ -19,11 +19,11 @@ import (
"testing"
"github.com/google/btree"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
func TestIndexGet(t *testing.T) {
ti := newTreeIndex(zap.NewExample())
ti := newTreeIndex(zaptest.NewLogger(t))
ti.Put([]byte("foo"), revision{main: 2})
ti.Put([]byte("foo"), revision{main: 4})
ti.Tombstone([]byte("foo"), revision{main: 6})
@ -65,7 +65,7 @@ func TestIndexRange(t *testing.T) {
allKeys := [][]byte{[]byte("foo"), []byte("foo1"), []byte("foo2")}
allRevs := []revision{{main: 1}, {main: 2}, {main: 3}}
ti := newTreeIndex(zap.NewExample())
ti := newTreeIndex(zaptest.NewLogger(t))
for i := range allKeys {
ti.Put(allKeys[i], allRevs[i])
}
@ -121,7 +121,7 @@ func TestIndexRange(t *testing.T) {
}
func TestIndexTombstone(t *testing.T) {
ti := newTreeIndex(zap.NewExample())
ti := newTreeIndex(zaptest.NewLogger(t))
ti.Put([]byte("foo"), revision{main: 1})
err := ti.Tombstone([]byte("foo"), revision{main: 2})
@ -143,7 +143,7 @@ func TestIndexRangeSince(t *testing.T) {
allKeys := [][]byte{[]byte("foo"), []byte("foo1"), []byte("foo2"), []byte("foo2"), []byte("foo1"), []byte("foo")}
allRevs := []revision{{main: 1}, {main: 2}, {main: 3}, {main: 4}, {main: 5}, {main: 6}}
ti := newTreeIndex(zap.NewExample())
ti := newTreeIndex(zaptest.NewLogger(t))
for i := range allKeys {
ti.Put(allKeys[i], allRevs[i])
}
@ -217,7 +217,7 @@ func TestIndexCompactAndKeep(t *testing.T) {
}
// Continuous Compact and Keep
ti := newTreeIndex(zap.NewExample())
ti := newTreeIndex(zaptest.NewLogger(t))
for _, tt := range tests {
if tt.remove {
ti.Tombstone(tt.key, tt.rev)
@ -248,7 +248,7 @@ func TestIndexCompactAndKeep(t *testing.T) {
// Once Compact and Keep
for i := int64(1); i < maxRev; i++ {
ti := newTreeIndex(zap.NewExample())
ti := newTreeIndex(zaptest.NewLogger(t))
for _, tt := range tests {
if tt.remove {
ti.Tombstone(tt.key, tt.rev)

View File

@ -19,6 +19,7 @@ import (
"testing"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
func TestKeyIndexGet(t *testing.T) {
@ -29,8 +30,8 @@ func TestKeyIndexGet(t *testing.T) {
// {{14, 0}[1], {14, 1}[2], {16, 0}(t)[3]}
// {{8, 0}[1], {10, 0}[2], {12, 0}(t)[3]}
// {{2, 0}[1], {4, 0}[2], {6, 0}(t)[3]}
ki := newTestKeyIndex()
ki.compact(zap.NewExample(), 4, make(map[revision]struct{}))
ki := newTestKeyIndex(zaptest.NewLogger(t))
ki.compact(zaptest.NewLogger(t), 4, make(map[revision]struct{}))
tests := []struct {
rev int64
@ -70,7 +71,7 @@ func TestKeyIndexGet(t *testing.T) {
}
for i, tt := range tests {
mod, creat, ver, err := ki.get(zap.NewExample(), tt.rev)
mod, creat, ver, err := ki.get(zaptest.NewLogger(t), tt.rev)
if err != tt.werr {
t.Errorf("#%d: err = %v, want %v", i, err, tt.werr)
}
@ -87,8 +88,8 @@ func TestKeyIndexGet(t *testing.T) {
}
func TestKeyIndexSince(t *testing.T) {
ki := newTestKeyIndex()
ki.compact(zap.NewExample(), 4, make(map[revision]struct{}))
ki := newTestKeyIndex(zaptest.NewLogger(t))
ki.compact(zaptest.NewLogger(t), 4, make(map[revision]struct{}))
allRevs := []revision{{4, 0}, {6, 0}, {8, 0}, {10, 0}, {12, 0}, {14, 1}, {16, 0}}
tests := []struct {
@ -117,7 +118,7 @@ func TestKeyIndexSince(t *testing.T) {
}
for i, tt := range tests {
revs := ki.since(zap.NewExample(), tt.rev)
revs := ki.since(zaptest.NewLogger(t), tt.rev)
if !reflect.DeepEqual(revs, tt.wrevs) {
t.Errorf("#%d: revs = %+v, want %+v", i, revs, tt.wrevs)
}
@ -126,7 +127,7 @@ func TestKeyIndexSince(t *testing.T) {
func TestKeyIndexPut(t *testing.T) {
ki := &keyIndex{key: []byte("foo")}
ki.put(zap.NewExample(), 5, 0)
ki.put(zaptest.NewLogger(t), 5, 0)
wki := &keyIndex{
key: []byte("foo"),
@ -137,7 +138,7 @@ func TestKeyIndexPut(t *testing.T) {
t.Errorf("ki = %+v, want %+v", ki, wki)
}
ki.put(zap.NewExample(), 7, 0)
ki.put(zaptest.NewLogger(t), 7, 0)
wki = &keyIndex{
key: []byte("foo"),
@ -151,7 +152,7 @@ func TestKeyIndexPut(t *testing.T) {
func TestKeyIndexRestore(t *testing.T) {
ki := &keyIndex{key: []byte("foo")}
ki.restore(zap.NewExample(), revision{5, 0}, revision{7, 0}, 2)
ki.restore(zaptest.NewLogger(t), revision{5, 0}, revision{7, 0}, 2)
wki := &keyIndex{
key: []byte("foo"),
@ -165,9 +166,9 @@ func TestKeyIndexRestore(t *testing.T) {
func TestKeyIndexTombstone(t *testing.T) {
ki := &keyIndex{key: []byte("foo")}
ki.put(zap.NewExample(), 5, 0)
ki.put(zaptest.NewLogger(t), 5, 0)
err := ki.tombstone(zap.NewExample(), 7, 0)
err := ki.tombstone(zaptest.NewLogger(t), 7, 0)
if err != nil {
t.Errorf("unexpected tombstone error: %v", err)
}
@ -181,9 +182,9 @@ func TestKeyIndexTombstone(t *testing.T) {
t.Errorf("ki = %+v, want %+v", ki, wki)
}
ki.put(zap.NewExample(), 8, 0)
ki.put(zap.NewExample(), 9, 0)
err = ki.tombstone(zap.NewExample(), 15, 0)
ki.put(zaptest.NewLogger(t), 8, 0)
ki.put(zaptest.NewLogger(t), 9, 0)
err = ki.tombstone(zaptest.NewLogger(t), 15, 0)
if err != nil {
t.Errorf("unexpected tombstone error: %v", err)
}
@ -201,7 +202,7 @@ func TestKeyIndexTombstone(t *testing.T) {
t.Errorf("ki = %+v, want %+v", ki, wki)
}
err = ki.tombstone(zap.NewExample(), 16, 0)
err = ki.tombstone(zaptest.NewLogger(t), 16, 0)
if err != ErrRevisionNotFound {
t.Errorf("tombstone error = %v, want %v", err, ErrRevisionNotFound)
}
@ -444,7 +445,7 @@ func TestKeyIndexCompactAndKeep(t *testing.T) {
}
// Continuous Compaction and finding Keep
ki := newTestKeyIndex()
ki := newTestKeyIndex(zaptest.NewLogger(t))
for i, tt := range tests {
am := make(map[revision]struct{})
kiclone := cloneKeyIndex(ki)
@ -456,7 +457,7 @@ func TestKeyIndexCompactAndKeep(t *testing.T) {
t.Errorf("#%d: am = %+v, want %+v", i, am, tt.wam)
}
am = make(map[revision]struct{})
ki.compact(zap.NewExample(), tt.compact, am)
ki.compact(zaptest.NewLogger(t), tt.compact, am)
if !reflect.DeepEqual(ki, tt.wki) {
t.Errorf("#%d: ki = %+v, want %+v", i, ki, tt.wki)
}
@ -466,7 +467,7 @@ func TestKeyIndexCompactAndKeep(t *testing.T) {
}
// Jump Compaction and finding Keep
ki = newTestKeyIndex()
ki = newTestKeyIndex(zaptest.NewLogger(t))
for i, tt := range tests {
if (i%2 == 0 && i < 6) || (i%2 == 1 && i > 6) {
am := make(map[revision]struct{})
@ -479,7 +480,7 @@ func TestKeyIndexCompactAndKeep(t *testing.T) {
t.Errorf("#%d: am = %+v, want %+v", i, am, tt.wam)
}
am = make(map[revision]struct{})
ki.compact(zap.NewExample(), tt.compact, am)
ki.compact(zaptest.NewLogger(t), tt.compact, am)
if !reflect.DeepEqual(ki, tt.wki) {
t.Errorf("#%d: ki = %+v, want %+v", i, ki, tt.wki)
}
@ -489,10 +490,10 @@ func TestKeyIndexCompactAndKeep(t *testing.T) {
}
}
kiClone := newTestKeyIndex()
kiClone := newTestKeyIndex(zaptest.NewLogger(t))
// Once Compaction and finding Keep
for i, tt := range tests {
ki := newTestKeyIndex()
ki := newTestKeyIndex(zaptest.NewLogger(t))
am := make(map[revision]struct{})
ki.keep(tt.compact, am)
if !reflect.DeepEqual(ki, kiClone) {
@ -502,7 +503,7 @@ func TestKeyIndexCompactAndKeep(t *testing.T) {
t.Errorf("#%d: am = %+v, want %+v", i, am, tt.wam)
}
am = make(map[revision]struct{})
ki.compact(zap.NewExample(), tt.compact, am)
ki.compact(zaptest.NewLogger(t), tt.compact, am)
if !reflect.DeepEqual(ki, tt.wki) {
t.Errorf("#%d: ki = %+v, want %+v", i, ki, tt.wki)
}
@ -532,10 +533,10 @@ func cloneGeneration(g *generation) *generation {
// test that compact on version that higher than last modified version works well
func TestKeyIndexCompactOnFurtherRev(t *testing.T) {
ki := &keyIndex{key: []byte("foo")}
ki.put(zap.NewExample(), 1, 0)
ki.put(zap.NewExample(), 2, 0)
ki.put(zaptest.NewLogger(t), 1, 0)
ki.put(zaptest.NewLogger(t), 2, 0)
am := make(map[revision]struct{})
ki.compact(zap.NewExample(), 3, am)
ki.compact(zaptest.NewLogger(t), 3, am)
wki := &keyIndex{
key: []byte("foo"),
@ -587,7 +588,7 @@ func TestKeyIndexIsEmpty(t *testing.T) {
}
func TestKeyIndexFindGeneration(t *testing.T) {
ki := newTestKeyIndex()
ki := newTestKeyIndex(zaptest.NewLogger(t))
tests := []struct {
rev int64
@ -677,7 +678,7 @@ func TestGenerationWalk(t *testing.T) {
}
}
func newTestKeyIndex() *keyIndex {
func newTestKeyIndex(lg *zap.Logger) *keyIndex {
// key: "foo"
// rev: 16
// generations:
@ -687,14 +688,14 @@ func newTestKeyIndex() *keyIndex {
// {{2, 0}[1], {4, 0}[2], {6, 0}(t)[3]}
ki := &keyIndex{key: []byte("foo")}
ki.put(zap.NewExample(), 2, 0)
ki.put(zap.NewExample(), 4, 0)
ki.tombstone(zap.NewExample(), 6, 0)
ki.put(zap.NewExample(), 8, 0)
ki.put(zap.NewExample(), 10, 0)
ki.tombstone(zap.NewExample(), 12, 0)
ki.put(zap.NewExample(), 14, 0)
ki.put(zap.NewExample(), 14, 1)
ki.tombstone(zap.NewExample(), 16, 0)
ki.put(lg, 2, 0)
ki.put(lg, 4, 0)
ki.tombstone(lg, 6, 0)
ki.put(lg, 8, 0)
ki.put(lg, 10, 0)
ki.tombstone(lg, 12, 0)
ki.put(lg, 14, 0)
ki.put(lg, 14, 1)
ki.tombstone(lg, 16, 0)
return ki
}

View File

@ -28,10 +28,10 @@ import (
"go.etcd.io/etcd/server/v3/lease"
"go.etcd.io/etcd/server/v3/storage/backend"
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.uber.org/zap/zaptest"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"go.uber.org/zap"
)
// Functional tests for features implemented in v3 store. It treats v3 store
@ -79,7 +79,7 @@ func TestKVTxnRange(t *testing.T) { testKVRange(t, txnRangeFunc) }
func testKVRange(t *testing.T, f rangeFunc) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, b, tmpPath)
kvs := put3TestKVs(s)
@ -145,7 +145,7 @@ func TestKVTxnRangeRev(t *testing.T) { testKVRangeRev(t, txnRangeFunc) }
func testKVRangeRev(t *testing.T, f rangeFunc) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, b, tmpPath)
kvs := put3TestKVs(s)
@ -181,7 +181,7 @@ func TestKVTxnRangeBadRev(t *testing.T) { testKVRangeBadRev(t, txnRangeFunc) }
func testKVRangeBadRev(t *testing.T, f rangeFunc) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, b, tmpPath)
put3TestKVs(s)
@ -214,7 +214,7 @@ func TestKVTxnRangeLimit(t *testing.T) { testKVRangeLimit(t, txnRangeFunc) }
func testKVRangeLimit(t *testing.T, f rangeFunc) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, b, tmpPath)
kvs := put3TestKVs(s)
@ -260,7 +260,7 @@ func TestKVTxnPutMultipleTimes(t *testing.T) { testKVPutMultipleTimes(t, txnPutF
func testKVPutMultipleTimes(t *testing.T, f putFunc) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, b, tmpPath)
for i := 0; i < 10; i++ {
@ -322,7 +322,7 @@ func testKVDeleteRange(t *testing.T, f deleteRangeFunc) {
for i, tt := range tests {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
s.Put([]byte("foo1"), []byte("bar1"), lease.NoLease)
@ -342,7 +342,7 @@ func TestKVTxnDeleteMultipleTimes(t *testing.T) { testKVDeleteMultipleTimes(t, t
func testKVDeleteMultipleTimes(t *testing.T, f deleteRangeFunc) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, b, tmpPath)
s.Put([]byte("foo"), []byte("bar"), lease.NoLease)
@ -365,7 +365,7 @@ func TestKVTxnPutWithSameLease(t *testing.T) { testKVPutWithSameLease(t, txnPutF
func testKVPutWithSameLease(t *testing.T, f putFunc) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, b, tmpPath)
leaseID := int64(1)
@ -397,7 +397,7 @@ func testKVPutWithSameLease(t *testing.T, f putFunc) {
// test that range, put, delete on single key in sequence repeatedly works correctly.
func TestKVOperationInSequence(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, b, tmpPath)
for i := 0; i < 10; i++ {
@ -444,7 +444,7 @@ func TestKVOperationInSequence(t *testing.T) {
func TestKVTxnBlockWriteOperations(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
tests := []func(){
func() { s.Put([]byte("foo"), nil, lease.NoLease) },
@ -478,7 +478,7 @@ func TestKVTxnBlockWriteOperations(t *testing.T) {
func TestKVTxnNonBlockRange(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, b, tmpPath)
txn := s.Write(traceutil.TODO())
@ -499,7 +499,7 @@ func TestKVTxnNonBlockRange(t *testing.T) {
// test that txn range, put, delete on single key in sequence repeatedly works correctly.
func TestKVTxnOperationInSequence(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, b, tmpPath)
for i := 0; i < 10; i++ {
@ -549,7 +549,7 @@ func TestKVTxnOperationInSequence(t *testing.T) {
func TestKVCompactReserveLastValue(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, b, tmpPath)
s.Put([]byte("foo"), []byte("bar0"), 1)
@ -603,7 +603,7 @@ func TestKVCompactReserveLastValue(t *testing.T) {
func TestKVCompactBad(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, b, tmpPath)
s.Put([]byte("foo"), []byte("bar0"), lease.NoLease)
@ -636,7 +636,7 @@ func TestKVHash(t *testing.T) {
for i := 0; i < len(hashes); i++ {
var err error
b, tmpPath := betesting.NewDefaultTmpBackend(t)
kv := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
kv := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
kv.Put([]byte("foo0"), []byte("bar0"), lease.NoLease)
kv.Put([]byte("foo1"), []byte("bar0"), lease.NoLease)
hashes[i], _, err = kv.Hash()
@ -674,7 +674,7 @@ func TestKVRestore(t *testing.T) {
}
for i, tt := range tests {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
tt(s)
var kvss [][]mvccpb.KeyValue
for k := int64(0); k < 10; k++ {
@ -686,7 +686,7 @@ func TestKVRestore(t *testing.T) {
s.Close()
// ns should recover the the previous state from backend.
ns := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
ns := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
if keysRestore := readGaugeInt(keysGauge); keysBefore != keysRestore {
t.Errorf("#%d: got %d key count, expected %d", i, keysRestore, keysBefore)
@ -718,7 +718,7 @@ func readGaugeInt(g prometheus.Gauge) int {
func TestKVSnapshot(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, b, tmpPath)
wkvs := put3TestKVs(s)
@ -738,7 +738,7 @@ func TestKVSnapshot(t *testing.T) {
}
f.Close()
ns := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
ns := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer ns.Close()
r, err := ns.Range(context.TODO(), []byte("a"), []byte("z"), RangeOptions{})
if err != nil {
@ -754,7 +754,7 @@ func TestKVSnapshot(t *testing.T) {
func TestWatchableKVWatch(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}))
s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}))
defer cleanup(s, b, tmpPath)
w := s.NewWatchStream()

View File

@ -24,13 +24,12 @@ import (
"go.etcd.io/etcd/server/v3/lease"
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.etcd.io/etcd/server/v3/storage/schema"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
func BenchmarkStorePut(b *testing.B) {
be, tmpPath := betesting.NewDefaultTmpBackend(b)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, be, tmpPath)
// arbitrary number of bytes
@ -49,7 +48,7 @@ func BenchmarkStoreRangeKey100(b *testing.B) { benchmarkStoreRange(b, 100) }
func benchmarkStoreRange(b *testing.B, n int) {
be, tmpPath := betesting.NewDefaultTmpBackend(b)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, be, tmpPath)
// 64 byte key/val
@ -98,7 +97,7 @@ func BenchmarkConsistentIndex(b *testing.B) {
// BenchmarkStoreTxnPutUpdate is same as above, but instead updates single key
func BenchmarkStorePutUpdate(b *testing.B) {
be, tmpPath := betesting.NewDefaultTmpBackend(b)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, be, tmpPath)
// arbitrary number of bytes
@ -116,7 +115,7 @@ func BenchmarkStorePutUpdate(b *testing.B) {
// some synchronization operations, such as mutex locking.
func BenchmarkStoreTxnPut(b *testing.B) {
be, tmpPath := betesting.NewDefaultTmpBackend(b)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, be, tmpPath)
// arbitrary number of bytes
@ -136,7 +135,7 @@ func BenchmarkStoreTxnPut(b *testing.B) {
// benchmarkStoreRestore benchmarks the restore operation
func benchmarkStoreRestore(revsPerKey int, b *testing.B) {
be, tmpPath := betesting.NewDefaultTmpBackend(b)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
// use closure to capture 's' to pick up the reassignment
defer func() { cleanup(s, be, tmpPath) }()
@ -156,7 +155,7 @@ func benchmarkStoreRestore(revsPerKey int, b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
s = NewStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
s = NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
}
func BenchmarkStoreRestoreRevs1(b *testing.B) {

View File

@ -25,7 +25,7 @@ import (
"go.etcd.io/etcd/server/v3/lease"
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.etcd.io/etcd/server/v3/storage/schema"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
func TestScheduleCompaction(t *testing.T) {
@ -68,7 +68,7 @@ func TestScheduleCompaction(t *testing.T) {
}
for i, tt := range tests {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
tx := s.b.BatchTx()
tx.Lock()
@ -101,7 +101,7 @@ func TestScheduleCompaction(t *testing.T) {
func TestCompactAllAndRestore(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s0 := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s0 := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer os.Remove(tmpPath)
s0.Put([]byte("foo"), []byte("bar"), lease.NoLease)
@ -127,7 +127,7 @@ func TestCompactAllAndRestore(t *testing.T) {
t.Fatal(err)
}
s1 := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s1 := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
if s1.Rev() != rev {
t.Errorf("rev = %v, want %v", s1.Rev(), rev)
}

View File

@ -38,13 +38,14 @@ import (
"go.etcd.io/etcd/server/v3/storage/backend"
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.etcd.io/etcd/server/v3/storage/schema"
"go.uber.org/zap/zaptest"
"go.uber.org/zap"
)
func TestStoreRev(t *testing.T) {
b, _ := betesting.NewDefaultTmpBackend(t)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer s.Close()
for i := 1; i <= 3; i++ {
@ -56,6 +57,7 @@ func TestStoreRev(t *testing.T) {
}
func TestStorePut(t *testing.T) {
lg := zaptest.NewLogger(t)
kv := mvccpb.KeyValue{
Key: []byte("foo"),
Value: []byte("bar"),
@ -84,7 +86,7 @@ func TestStorePut(t *testing.T) {
nil,
revision{2, 0},
newTestKeyBytes(revision{2, 0}, false),
newTestKeyBytes(lg, revision{2, 0}, false),
mvccpb.KeyValue{
Key: []byte("foo"),
Value: []byte("bar"),
@ -98,10 +100,10 @@ func TestStorePut(t *testing.T) {
{
revision{1, 1},
indexGetResp{revision{2, 0}, revision{2, 0}, 1, nil},
&rangeResp{[][]byte{newTestKeyBytes(revision{2, 1}, false)}, [][]byte{kvb}},
&rangeResp{[][]byte{newTestKeyBytes(lg, revision{2, 1}, false)}, [][]byte{kvb}},
revision{2, 0},
newTestKeyBytes(revision{2, 0}, false),
newTestKeyBytes(lg, revision{2, 0}, false),
mvccpb.KeyValue{
Key: []byte("foo"),
Value: []byte("bar"),
@ -115,10 +117,10 @@ func TestStorePut(t *testing.T) {
{
revision{2, 0},
indexGetResp{revision{2, 1}, revision{2, 0}, 2, nil},
&rangeResp{[][]byte{newTestKeyBytes(revision{2, 1}, false)}, [][]byte{kvb}},
&rangeResp{[][]byte{newTestKeyBytes(lg, revision{2, 1}, false)}, [][]byte{kvb}},
revision{3, 0},
newTestKeyBytes(revision{3, 0}, false),
newTestKeyBytes(lg, revision{3, 0}, false),
mvccpb.KeyValue{
Key: []byte("foo"),
Value: []byte("bar"),
@ -131,7 +133,7 @@ func TestStorePut(t *testing.T) {
},
}
for i, tt := range tests {
s := newFakeStore()
s := newFakeStore(lg)
b := s.b.(*fakeBackend)
fi := s.kvindex.(*fakeIndex)
@ -177,7 +179,8 @@ func TestStorePut(t *testing.T) {
}
func TestStoreRange(t *testing.T) {
key := newTestKeyBytes(revision{2, 0}, false)
lg := zaptest.NewLogger(t)
key := newTestKeyBytes(lg, revision{2, 0}, false)
kv := mvccpb.KeyValue{
Key: []byte("foo"),
Value: []byte("bar"),
@ -207,7 +210,7 @@ func TestStoreRange(t *testing.T) {
ro := RangeOptions{Limit: 1, Rev: 0, Count: false}
for i, tt := range tests {
s := newFakeStore()
s := newFakeStore(lg)
b := s.b.(*fakeBackend)
fi := s.kvindex.(*fakeIndex)
@ -249,7 +252,8 @@ func TestStoreRange(t *testing.T) {
}
func TestStoreDeleteRange(t *testing.T) {
key := newTestKeyBytes(revision{2, 0}, false)
lg := zaptest.NewLogger(t)
key := newTestKeyBytes(lg, revision{2, 0}, false)
kv := mvccpb.KeyValue{
Key: []byte("foo"),
Value: []byte("bar"),
@ -277,14 +281,14 @@ func TestStoreDeleteRange(t *testing.T) {
indexRangeResp{[][]byte{[]byte("foo")}, []revision{{2, 0}}},
rangeResp{[][]byte{key}, [][]byte{kvb}},
newTestKeyBytes(revision{3, 0}, true),
newTestKeyBytes(lg, revision{3, 0}, true),
revision{3, 0},
2,
revision{3, 0},
},
}
for i, tt := range tests {
s := newFakeStore()
s := newFakeStore(lg)
b := s.b.(*fakeBackend)
fi := s.kvindex.(*fakeIndex)
@ -323,15 +327,16 @@ func TestStoreDeleteRange(t *testing.T) {
}
func TestStoreCompact(t *testing.T) {
s := newFakeStore()
lg := zaptest.NewLogger(t)
s := newFakeStore(lg)
defer s.Close()
b := s.b.(*fakeBackend)
fi := s.kvindex.(*fakeIndex)
s.currentRev = 3
fi.indexCompactRespc <- map[revision]struct{}{{1, 0}: {}}
key1 := newTestKeyBytes(revision{1, 0}, false)
key2 := newTestKeyBytes(revision{2, 0}, false)
key1 := newTestKeyBytes(lg, revision{1, 0}, false)
key2 := newTestKeyBytes(lg, revision{2, 0}, false)
b.tx.rangeRespc <- rangeResp{[][]byte{key1, key2}, nil}
s.Compact(traceutil.TODO(), 3)
@ -360,11 +365,12 @@ func TestStoreCompact(t *testing.T) {
}
func TestStoreRestore(t *testing.T) {
s := newFakeStore()
lg := zaptest.NewLogger(t)
s := newFakeStore(lg)
b := s.b.(*fakeBackend)
fi := s.kvindex.(*fakeIndex)
putkey := newTestKeyBytes(revision{3, 0}, false)
putkey := newTestKeyBytes(lg, revision{3, 0}, false)
putkv := mvccpb.KeyValue{
Key: []byte("foo"),
Value: []byte("bar"),
@ -376,7 +382,7 @@ func TestStoreRestore(t *testing.T) {
if err != nil {
t.Fatal(err)
}
delkey := newTestKeyBytes(revision{5, 0}, true)
delkey := newTestKeyBytes(lg, revision{5, 0}, true)
delkv := mvccpb.KeyValue{
Key: []byte("foo"),
}
@ -427,7 +433,7 @@ func TestRestoreDelete(t *testing.T) {
defer func() { restoreChunkKeys = oldChunk }()
b, _ := betesting.NewDefaultTmpBackend(t)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
keys := make(map[string]struct{})
for i := 0; i < 20; i++ {
@ -452,7 +458,7 @@ func TestRestoreDelete(t *testing.T) {
}
s.Close()
s = NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s = NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer s.Close()
for i := 0; i < 20; i++ {
ks := fmt.Sprintf("foo-%d", i)
@ -474,7 +480,7 @@ func TestRestoreContinueUnfinishedCompaction(t *testing.T) {
tests := []string{"recreate", "restore"}
for _, test := range tests {
b, _ := betesting.NewDefaultTmpBackend(t)
s0 := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s0 := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
s0.Put([]byte("foo"), []byte("bar"), lease.NoLease)
s0.Put([]byte("foo"), []byte("bar1"), lease.NoLease)
@ -493,7 +499,7 @@ func TestRestoreContinueUnfinishedCompaction(t *testing.T) {
var s *store
switch test {
case "recreate":
s = NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s = NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
case "restore":
s0.Restore(b)
s = s0
@ -535,7 +541,7 @@ type hashKVResult struct {
// TestHashKVWhenCompacting ensures that HashKV returns correct hash when compacting.
func TestHashKVWhenCompacting(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer s.Close()
defer b.Close()
defer os.Remove(tmpPath)
@ -605,7 +611,7 @@ func TestHashKVWhenCompacting(t *testing.T) {
// correct hash value with latest revision.
func TestHashKVZeroRevision(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer os.Remove(tmpPath)
rev := 10000
@ -638,7 +644,7 @@ func TestTxnPut(t *testing.T) {
vals := createBytesSlice(bytesN, sliceN)
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, b, tmpPath)
for i := 0; i < sliceN; i++ {
@ -654,7 +660,7 @@ func TestTxnPut(t *testing.T) {
// TestConcurrentReadNotBlockingWrite ensures Read does not blocking Write after its creation
func TestConcurrentReadNotBlockingWrite(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer os.Remove(tmpPath)
// write something to read later
@ -723,7 +729,7 @@ func TestConcurrentReadTxAndWrite(t *testing.T) {
mu sync.Mutex // mu protects committedKVs
)
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer b.Close()
defer s.Close()
defer os.Remove(tmpPath)
@ -830,16 +836,16 @@ func newTestRevBytes(rev revision) []byte {
return bytes
}
func newTestKeyBytes(rev revision, tombstone bool) []byte {
func newTestKeyBytes(lg *zap.Logger, rev revision, tombstone bool) []byte {
bytes := newRevBytes()
revToBytes(rev, bytes)
if tombstone {
bytes = appendMarkTombstone(zap.NewExample(), bytes)
bytes = appendMarkTombstone(lg, bytes)
}
return bytes
}
func newFakeStore() *store {
func newFakeStore(lg *zap.Logger) *store {
b := &fakeBackend{&fakeBatchTx{
Recorder: &testutil.RecorderBuffered{},
rangeRespc: make(chan rangeResp, 5)}}
@ -859,7 +865,7 @@ func newFakeStore() *store {
compactMainRev: -1,
fifoSched: schedule.NewFIFOScheduler(),
stopc: make(chan struct{}),
lg: zap.NewExample(),
lg: lg,
}
s.ReadView, s.WriteView = &readView{s}, &writeView{s}
return s

View File

@ -22,13 +22,12 @@ import (
"go.etcd.io/etcd/pkg/v3/traceutil"
"go.etcd.io/etcd/server/v3/lease"
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
func BenchmarkWatchableStorePut(b *testing.B) {
be, tmpPath := betesting.NewDefaultTmpBackend(b)
s := New(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
s := New(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, be, tmpPath)
// arbitrary number of bytes
@ -48,7 +47,7 @@ func BenchmarkWatchableStorePut(b *testing.B) {
// some synchronization operations, such as mutex locking.
func BenchmarkWatchableStoreTxnPut(b *testing.B) {
be, tmpPath := betesting.NewDefaultTmpBackend(b)
s := New(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
s := New(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, be, tmpPath)
// arbitrary number of bytes
@ -79,7 +78,7 @@ func BenchmarkWatchableStoreWatchPutUnsync(b *testing.B) {
func benchmarkWatchableStoreWatchPut(b *testing.B, synced bool) {
be, tmpPath := betesting.NewDefaultTmpBackend(b)
s := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
s := newWatchableStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, be, tmpPath)
k := []byte("testkey")
@ -122,7 +121,7 @@ func benchmarkWatchableStoreWatchPut(b *testing.B, synced bool) {
// we should put to simulate the real-world use cases.
func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
be, tmpPath := betesting.NewDefaultTmpBackend(b)
s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
// manually create watchableStore instead of newWatchableStore
// because newWatchableStore periodically calls syncWatchersLoop
@ -179,7 +178,7 @@ func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
func BenchmarkWatchableStoreSyncedCancel(b *testing.B) {
be, tmpPath := betesting.NewDefaultTmpBackend(b)
s := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
s := newWatchableStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
defer func() {
s.store.Close()

View File

@ -27,12 +27,12 @@ import (
"go.etcd.io/etcd/pkg/v3/traceutil"
"go.etcd.io/etcd/server/v3/lease"
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
func TestWatch(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer func() {
b.Close()
@ -55,7 +55,7 @@ func TestWatch(t *testing.T) {
func TestNewWatcherCancel(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer func() {
s.store.Close()
@ -87,7 +87,7 @@ func TestCancelUnsynced(t *testing.T) {
// method to sync watchers in unsynced map. We want to keep watchers
// in unsynced to test if syncWatchers works as expected.
s := &watchableStore{
store: NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}),
store: NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}),
unsynced: newWatcherGroup(),
// to make the test not crash from assigning to nil map.
@ -142,7 +142,7 @@ func TestSyncWatchers(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := &watchableStore{
store: NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}),
store: NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}),
unsynced: newWatcherGroup(),
synced: newWatcherGroup(),
}
@ -225,7 +225,7 @@ func TestSyncWatchers(t *testing.T) {
// TestWatchCompacted tests a watcher that watches on a compacted revision.
func TestWatchCompacted(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer func() {
s.store.Close()
@ -262,7 +262,7 @@ func TestWatchCompacted(t *testing.T) {
func TestWatchFutureRev(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer func() {
b.Close()
@ -304,7 +304,7 @@ func TestWatchRestore(t *testing.T) {
test := func(delay time.Duration) func(t *testing.T) {
return func(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s, b, tmpPath)
testKey := []byte("foo")
@ -312,7 +312,7 @@ func TestWatchRestore(t *testing.T) {
rev := s.Put(testKey, testValue, lease.NoLease)
newBackend, newPath := betesting.NewDefaultTmpBackend(t)
newStore := newWatchableStore(zap.NewExample(), newBackend, &lease.FakeLessor{}, StoreConfig{})
newStore := newWatchableStore(zaptest.NewLogger(t), newBackend, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(newStore, newBackend, newPath)
w := newStore.NewWatchStream()
@ -350,11 +350,11 @@ func TestWatchRestore(t *testing.T) {
// 5. choose the watcher from step 1, without panic
func TestWatchRestoreSyncedWatcher(t *testing.T) {
b1, b1Path := betesting.NewDefaultTmpBackend(t)
s1 := newWatchableStore(zap.NewExample(), b1, &lease.FakeLessor{}, StoreConfig{})
s1 := newWatchableStore(zaptest.NewLogger(t), b1, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s1, b1, b1Path)
b2, b2Path := betesting.NewDefaultTmpBackend(t)
s2 := newWatchableStore(zap.NewExample(), b2, &lease.FakeLessor{}, StoreConfig{})
s2 := newWatchableStore(zaptest.NewLogger(t), b2, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(s2, b2, b2Path)
testKey, testValue := []byte("foo"), []byte("bar")
@ -401,7 +401,7 @@ func TestWatchRestoreSyncedWatcher(t *testing.T) {
// TestWatchBatchUnsynced tests batching on unsynced watchers
func TestWatchBatchUnsynced(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
oldMaxRevs := watchBatchMaxRevs
defer func() {
@ -535,7 +535,7 @@ func TestWatchVictims(t *testing.T) {
oldChanBufLen, oldMaxWatchersPerSync := chanBufLen, maxWatchersPerSync
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer func() {
b.Close()
@ -614,7 +614,7 @@ func TestWatchVictims(t *testing.T) {
// canceling its watches.
func TestStressWatchCancelClose(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer func() {
b.Close()

View File

@ -20,13 +20,12 @@ import (
"go.etcd.io/etcd/server/v3/lease"
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
func BenchmarkKVWatcherMemoryUsage(b *testing.B) {
be, tmpPath := betesting.NewDefaultTmpBackend(b)
watchable := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
watchable := newWatchableStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
defer cleanup(watchable, be, tmpPath)

View File

@ -25,14 +25,14 @@ import (
"go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/server/v3/lease"
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
// TestWatcherWatchID tests that each watcher provides unique watchID,
// and the watched event attaches the correct watchID.
func TestWatcherWatchID(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}))
s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}))
defer cleanup(s, b, tmpPath)
w := s.NewWatchStream()
@ -82,7 +82,7 @@ func TestWatcherWatchID(t *testing.T) {
func TestWatcherRequestsCustomID(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}))
s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}))
defer cleanup(s, b, tmpPath)
w := s.NewWatchStream()
@ -119,7 +119,7 @@ func TestWatcherRequestsCustomID(t *testing.T) {
// and returns events with matching prefixes.
func TestWatcherWatchPrefix(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}))
s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}))
defer cleanup(s, b, tmpPath)
w := s.NewWatchStream()
@ -193,7 +193,7 @@ func TestWatcherWatchPrefix(t *testing.T) {
// does not create watcher, which panics when canceling in range tree.
func TestWatcherWatchWrongRange(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}))
s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}))
defer cleanup(s, b, tmpPath)
w := s.NewWatchStream()
@ -213,7 +213,7 @@ func TestWatcherWatchWrongRange(t *testing.T) {
func TestWatchDeleteRange(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{})
s := newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
defer func() {
b.Close()
@ -253,7 +253,7 @@ func TestWatchDeleteRange(t *testing.T) {
// with given id inside watchStream.
func TestWatchStreamCancelWatcherByID(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}))
s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}))
defer cleanup(s, b, tmpPath)
w := s.NewWatchStream()
@ -296,7 +296,7 @@ func TestWatcherRequestProgress(t *testing.T) {
// method to sync watchers in unsynced map. We want to keep watchers
// in unsynced to test if syncWatchers works as expected.
s := &watchableStore{
store: NewStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}),
store: NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}),
unsynced: newWatcherGroup(),
synced: newWatcherGroup(),
}
@ -345,7 +345,7 @@ func TestWatcherRequestProgress(t *testing.T) {
func TestWatcherWatchWithFilter(t *testing.T) {
b, tmpPath := betesting.NewDefaultTmpBackend(t)
s := WatchableKV(newWatchableStore(zap.NewExample(), b, &lease.FakeLessor{}, StoreConfig{}))
s := WatchableKV(newWatchableStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{}))
defer cleanup(s, b, tmpPath)
w := s.NewWatchStream()

View File

@ -18,13 +18,13 @@ import (
"math"
"testing"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
func TestFilePipeline(t *testing.T) {
tdir := t.TempDir()
fp := newFilePipeline(zap.NewExample(), tdir, SegmentSizeBytes)
fp := newFilePipeline(zaptest.NewLogger(t), tdir, SegmentSizeBytes)
defer fp.Close()
f, ferr := fp.Open()
@ -37,7 +37,7 @@ func TestFilePipeline(t *testing.T) {
func TestFilePipelineFailPreallocate(t *testing.T) {
tdir := t.TempDir()
fp := newFilePipeline(zap.NewExample(), tdir, math.MaxInt64)
fp := newFilePipeline(zaptest.NewLogger(t), tdir, math.MaxInt64)
defer fp.Close()
f, ferr := fp.Open()
@ -49,7 +49,7 @@ func TestFilePipelineFailPreallocate(t *testing.T) {
func TestFilePipelineFailLockFile(t *testing.T) {
tdir := t.TempDir()
fp := newFilePipeline(zap.NewExample(), tdir, math.MaxInt64)
fp := newFilePipeline(zaptest.NewLogger(t), tdir, math.MaxInt64)
defer fp.Close()
f, ferr := fp.Open()

View File

@ -22,8 +22,7 @@ import (
"go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/storage/wal/walpb"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
type corruptFunc func(string, int64) error
@ -31,7 +30,7 @@ type corruptFunc func(string, int64) error
// TestRepairTruncate ensures a truncated file can be repaired
func TestRepairTruncate(t *testing.T) {
corruptf := func(p string, offset int64) error {
f, err := openLast(zap.NewExample(), p)
f, err := openLast(zaptest.NewLogger(t), p)
if err != nil {
return err
}
@ -46,7 +45,7 @@ func testRepair(t *testing.T, ents [][]raftpb.Entry, corrupt corruptFunc, expect
p := t.TempDir()
// create WAL
w, err := Create(zap.NewExample(), p, nil)
w, err := Create(zaptest.NewLogger(t), p, nil)
defer func() {
if err = w.Close(); err != nil {
t.Fatal(err)
@ -74,7 +73,7 @@ func testRepair(t *testing.T, ents [][]raftpb.Entry, corrupt corruptFunc, expect
}
// verify we broke the wal
w, err = Open(zap.NewExample(), p, walpb.Snapshot{})
w, err = Open(zaptest.NewLogger(t), p, walpb.Snapshot{})
if err != nil {
t.Fatal(err)
}
@ -85,12 +84,12 @@ func testRepair(t *testing.T, ents [][]raftpb.Entry, corrupt corruptFunc, expect
w.Close()
// repair the wal
if ok := Repair(zap.NewExample(), p); !ok {
if ok := Repair(zaptest.NewLogger(t), p); !ok {
t.Fatalf("'Repair' returned '%v', want 'true'", ok)
}
// read it back
w, err = Open(zap.NewExample(), p, walpb.Snapshot{})
w, err = Open(zaptest.NewLogger(t), p, walpb.Snapshot{})
if err != nil {
t.Fatal(err)
}
@ -112,7 +111,7 @@ func testRepair(t *testing.T, ents [][]raftpb.Entry, corrupt corruptFunc, expect
w.Close()
// read back entries following repair, ensure it's all there
w, err = Open(zap.NewExample(), p, walpb.Snapshot{})
w, err = Open(zaptest.NewLogger(t), p, walpb.Snapshot{})
if err != nil {
t.Fatal(err)
}
@ -136,7 +135,7 @@ func makeEnts(ents int) (ret [][]raftpb.Entry) {
// that straddled two sectors.
func TestRepairWriteTearLast(t *testing.T) {
corruptf := func(p string, offset int64) error {
f, err := openLast(zap.NewExample(), p)
f, err := openLast(zaptest.NewLogger(t), p)
if err != nil {
return err
}
@ -157,7 +156,7 @@ func TestRepairWriteTearLast(t *testing.T) {
// in the middle of a record.
func TestRepairWriteTearMiddle(t *testing.T) {
corruptf := func(p string, offset int64) error {
f, err := openLast(zap.NewExample(), p)
f, err := openLast(zaptest.NewLogger(t), p)
if err != nil {
return err
}
@ -181,7 +180,7 @@ func TestRepairWriteTearMiddle(t *testing.T) {
func TestRepairFailDeleteDir(t *testing.T) {
p := t.TempDir()
w, err := Create(zap.NewExample(), p, nil)
w, err := Create(zaptest.NewLogger(t), p, nil)
if err != nil {
t.Fatal(err)
}
@ -203,7 +202,7 @@ func TestRepairFailDeleteDir(t *testing.T) {
}
w.Close()
f, err := openLast(zap.NewExample(), p)
f, err := openLast(zaptest.NewLogger(t), p)
if err != nil {
t.Fatal(err)
}
@ -212,7 +211,7 @@ func TestRepairFailDeleteDir(t *testing.T) {
}
f.Close()
w, err = Open(zap.NewExample(), p, walpb.Snapshot{})
w, err = Open(zaptest.NewLogger(t), p, walpb.Snapshot{})
if err != nil {
t.Fatal(err)
}
@ -223,7 +222,7 @@ func TestRepairFailDeleteDir(t *testing.T) {
w.Close()
os.RemoveAll(p)
if Repair(zap.NewExample(), p) {
if Repair(zaptest.NewLogger(t), p) {
t.Fatal("expect 'Repair' fail on unexpected directory deletion")
}
}

View File

@ -17,9 +17,8 @@ package wal
import (
"testing"
"go.uber.org/zap"
"go.etcd.io/etcd/raft/v3/raftpb"
"go.uber.org/zap/zaptest"
)
func BenchmarkWrite100EntryWithoutBatch(b *testing.B) { benchmarkWriteEntry(b, 100, 0) }
@ -37,7 +36,7 @@ func BenchmarkWrite1000EntryBatch1000(b *testing.B) { benchmarkWriteEntry(b,
func benchmarkWriteEntry(b *testing.B, size int, batch int) {
p := b.TempDir()
w, err := Create(zap.NewExample(), p, []byte("somedata"))
w, err := Create(zaptest.NewLogger(b), p, []byte("somedata"))
if err != nil {
b.Fatalf("err = %v, want nil", err)
}

View File

@ -32,8 +32,6 @@ import (
"go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/storage/wal/walpb"
"go.uber.org/zap/zaptest"
"go.uber.org/zap"
)
var (
@ -46,7 +44,7 @@ var (
func TestNew(t *testing.T) {
p := t.TempDir()
w, err := Create(zap.NewExample(), p, []byte("somedata"))
w, err := Create(zaptest.NewLogger(t), p, []byte("somedata"))
if err != nil {
t.Fatalf("err = %v, want nil", err)
}
@ -97,7 +95,7 @@ func TestCreateFailFromPollutedDir(t *testing.T) {
p := t.TempDir()
os.WriteFile(filepath.Join(p, "test.wal"), []byte("data"), os.ModeTemporary)
_, err := Create(zap.NewExample(), p, []byte("data"))
_, err := Create(zaptest.NewLogger(t), p, []byte("data"))
if err != os.ErrExist {
t.Fatalf("expected %v, got %v", os.ErrExist, err)
}
@ -110,7 +108,7 @@ func TestWalCleanup(t *testing.T) {
t.Fatal(err)
}
logger := zap.NewExample()
logger := zaptest.NewLogger(t)
w, err := Create(logger, p, []byte(""))
if err != nil {
t.Fatalf("err = %v, want nil", err)
@ -139,7 +137,7 @@ func TestCreateFailFromNoSpaceLeft(t *testing.T) {
}()
SegmentSizeBytes = math.MaxInt64
_, err := Create(zap.NewExample(), p, []byte("data"))
_, err := Create(zaptest.NewLogger(t), p, []byte("data"))
if err == nil { // no space left on device
t.Fatalf("expected error 'no space left on device', got nil")
}
@ -149,7 +147,7 @@ func TestNewForInitedDir(t *testing.T) {
p := t.TempDir()
os.Create(filepath.Join(p, walName(0, 0)))
if _, err := Create(zap.NewExample(), p, nil); err == nil || err != os.ErrExist {
if _, err := Create(zaptest.NewLogger(t), p, nil); err == nil || err != os.ErrExist {
t.Errorf("err = %v, want %v", err, os.ErrExist)
}
}
@ -163,7 +161,7 @@ func TestOpenAtIndex(t *testing.T) {
}
f.Close()
w, err := Open(zap.NewExample(), dir, walpb.Snapshot{})
w, err := Open(zaptest.NewLogger(t), dir, walpb.Snapshot{})
if err != nil {
t.Fatalf("err = %v, want nil", err)
}
@ -182,7 +180,7 @@ func TestOpenAtIndex(t *testing.T) {
}
f.Close()
w, err = Open(zap.NewExample(), dir, walpb.Snapshot{Index: 5})
w, err = Open(zaptest.NewLogger(t), dir, walpb.Snapshot{Index: 5})
if err != nil {
t.Fatalf("err = %v, want nil", err)
}
@ -195,7 +193,7 @@ func TestOpenAtIndex(t *testing.T) {
w.Close()
emptydir := t.TempDir()
if _, err = Open(zap.NewExample(), emptydir, walpb.Snapshot{}); err != ErrFileNotFound {
if _, err = Open(zaptest.NewLogger(t), emptydir, walpb.Snapshot{}); err != ErrFileNotFound {
t.Errorf("err = %v, want %v", err, ErrFileNotFound)
}
}
@ -256,7 +254,7 @@ func TestVerify(t *testing.T) {
func TestCut(t *testing.T) {
p := t.TempDir()
w, err := Create(zap.NewExample(), p, nil)
w, err := Create(zaptest.NewLogger(t), p, nil)
if err != nil {
t.Fatal(err)
}
@ -314,7 +312,7 @@ func TestCut(t *testing.T) {
func TestSaveWithCut(t *testing.T) {
p := t.TempDir()
w, err := Create(zap.NewExample(), p, []byte("metadata"))
w, err := Create(zaptest.NewLogger(t), p, []byte("metadata"))
if err != nil {
t.Fatal(err)
}
@ -342,7 +340,7 @@ func TestSaveWithCut(t *testing.T) {
w.Close()
neww, err := Open(zap.NewExample(), p, walpb.Snapshot{})
neww, err := Open(zaptest.NewLogger(t), p, walpb.Snapshot{})
if err != nil {
t.Fatalf("err = %v, want nil", err)
}
@ -373,7 +371,7 @@ func TestSaveWithCut(t *testing.T) {
func TestRecover(t *testing.T) {
p := t.TempDir()
w, err := Create(zap.NewExample(), p, []byte("metadata"))
w, err := Create(zaptest.NewLogger(t), p, []byte("metadata"))
if err != nil {
t.Fatal(err)
}
@ -392,7 +390,7 @@ func TestRecover(t *testing.T) {
}
w.Close()
if w, err = Open(zap.NewExample(), p, walpb.Snapshot{}); err != nil {
if w, err = Open(zaptest.NewLogger(t), p, walpb.Snapshot{}); err != nil {
t.Fatal(err)
}
metadata, state, entries, err := w.ReadAll()
@ -447,7 +445,7 @@ func TestSearchIndex(t *testing.T) {
},
}
for i, tt := range tests {
idx, ok := searchIndex(zap.NewExample(), tt.names, tt.index)
idx, ok := searchIndex(zaptest.NewLogger(t), tt.names, tt.index)
if idx != tt.widx {
t.Errorf("#%d: idx = %d, want %d", i, idx, tt.widx)
}
@ -484,7 +482,7 @@ func TestScanWalName(t *testing.T) {
func TestRecoverAfterCut(t *testing.T) {
p := t.TempDir()
md, err := Create(zap.NewExample(), p, []byte("metadata"))
md, err := Create(zaptest.NewLogger(t), p, []byte("metadata"))
if err != nil {
t.Fatal(err)
}
@ -507,7 +505,7 @@ func TestRecoverAfterCut(t *testing.T) {
}
for i := 0; i < 10; i++ {
w, err := Open(zap.NewExample(), p, walpb.Snapshot{Index: uint64(i), Term: 1})
w, err := Open(zaptest.NewLogger(t), p, walpb.Snapshot{Index: uint64(i), Term: 1})
if err != nil {
if i <= 4 {
if err != ErrFileNotFound {
@ -538,7 +536,7 @@ func TestRecoverAfterCut(t *testing.T) {
func TestOpenAtUncommittedIndex(t *testing.T) {
p := t.TempDir()
w, err := Create(zap.NewExample(), p, nil)
w, err := Create(zaptest.NewLogger(t), p, nil)
if err != nil {
t.Fatal(err)
}
@ -550,7 +548,7 @@ func TestOpenAtUncommittedIndex(t *testing.T) {
}
w.Close()
w, err = Open(zap.NewExample(), p, walpb.Snapshot{})
w, err = Open(zaptest.NewLogger(t), p, walpb.Snapshot{})
if err != nil {
t.Fatal(err)
}
@ -568,7 +566,7 @@ func TestOpenAtUncommittedIndex(t *testing.T) {
func TestOpenForRead(t *testing.T) {
p := t.TempDir()
// create WAL
w, err := Create(zap.NewExample(), p, nil)
w, err := Create(zaptest.NewLogger(t), p, nil)
if err != nil {
t.Fatal(err)
}
@ -588,7 +586,7 @@ func TestOpenForRead(t *testing.T) {
w.ReleaseLockTo(unlockIndex)
// All are available for read
w2, err := OpenForRead(zap.NewExample(), p, walpb.Snapshot{})
w2, err := OpenForRead(zaptest.NewLogger(t), p, walpb.Snapshot{})
if err != nil {
t.Fatal(err)
}
@ -605,7 +603,7 @@ func TestOpenForRead(t *testing.T) {
func TestOpenWithMaxIndex(t *testing.T) {
p := t.TempDir()
// create WAL
w, err := Create(zap.NewExample(), p, nil)
w, err := Create(zaptest.NewLogger(t), p, nil)
if err != nil {
t.Fatal(err)
}
@ -617,7 +615,7 @@ func TestOpenWithMaxIndex(t *testing.T) {
}
w.Close()
w, err = Open(zap.NewExample(), p, walpb.Snapshot{})
w, err = Open(zaptest.NewLogger(t), p, walpb.Snapshot{})
if err != nil {
t.Fatal(err)
}
@ -644,7 +642,7 @@ func TestSaveEmpty(t *testing.T) {
func TestReleaseLockTo(t *testing.T) {
p := t.TempDir()
// create WAL
w, err := Create(zap.NewExample(), p, nil)
w, err := Create(zaptest.NewLogger(t), p, nil)
defer func() {
if err = w.Close(); err != nil {
t.Fatal(err)
@ -713,7 +711,7 @@ func TestTailWriteNoSlackSpace(t *testing.T) {
p := t.TempDir()
// create initial WAL
w, err := Create(zap.NewExample(), p, []byte("metadata"))
w, err := Create(zaptest.NewLogger(t), p, []byte("metadata"))
if err != nil {
t.Fatal(err)
}
@ -735,7 +733,7 @@ func TestTailWriteNoSlackSpace(t *testing.T) {
w.Close()
// open, write more
w, err = Open(zap.NewExample(), p, walpb.Snapshot{})
w, err = Open(zaptest.NewLogger(t), p, walpb.Snapshot{})
if err != nil {
t.Fatal(err)
}
@ -756,7 +754,7 @@ func TestTailWriteNoSlackSpace(t *testing.T) {
w.Close()
// confirm all writes
w, err = Open(zap.NewExample(), p, walpb.Snapshot{})
w, err = Open(zaptest.NewLogger(t), p, walpb.Snapshot{})
if err != nil {
t.Fatal(err)
}
@ -784,7 +782,7 @@ func TestRestartCreateWal(t *testing.T) {
t.Fatal(err)
}
w, werr := Create(zap.NewExample(), p, []byte("abc"))
w, werr := Create(zaptest.NewLogger(t), p, []byte("abc"))
if werr != nil {
t.Fatal(werr)
}
@ -793,7 +791,7 @@ func TestRestartCreateWal(t *testing.T) {
t.Fatalf("got %q exists, expected it to not exist", tmpdir)
}
if w, err = OpenForRead(zap.NewExample(), p, walpb.Snapshot{}); err != nil {
if w, err = OpenForRead(zaptest.NewLogger(t), p, walpb.Snapshot{}); err != nil {
t.Fatal(err)
}
defer w.Close()
@ -810,7 +808,7 @@ func TestOpenOnTornWrite(t *testing.T) {
overwriteEntries := 5
p := t.TempDir()
w, err := Create(zap.NewExample(), p, nil)
w, err := Create(zaptest.NewLogger(t), p, nil)
defer func() {
if err = w.Close(); err != nil && err != os.ErrInvalid {
t.Fatal(err)
@ -852,7 +850,7 @@ func TestOpenOnTornWrite(t *testing.T) {
}
f.Close()
w, err = Open(zap.NewExample(), p, walpb.Snapshot{})
w, err = Open(zaptest.NewLogger(t), p, walpb.Snapshot{})
if err != nil {
t.Fatal(err)
}
@ -873,7 +871,7 @@ func TestOpenOnTornWrite(t *testing.T) {
w.Close()
// read back the entries, confirm number of entries matches expectation
w, err = OpenForRead(zap.NewExample(), p, walpb.Snapshot{})
w, err = OpenForRead(zaptest.NewLogger(t), p, walpb.Snapshot{})
if err != nil {
t.Fatal(err)
}
@ -902,7 +900,7 @@ func TestRenameFail(t *testing.T) {
os.RemoveAll(tp)
w := &WAL{
lg: zap.NewExample(),
lg: zaptest.NewLogger(t),
dir: p,
}
w2, werr := w.renameWAL(tp)
@ -916,7 +914,7 @@ func TestReadAllFail(t *testing.T) {
dir := t.TempDir()
// create initial WAL
f, err := Create(zap.NewExample(), dir, []byte("metadata"))
f, err := Create(zaptest.NewLogger(t), dir, []byte("metadata"))
if err != nil {
t.Fatal(err)
}
@ -940,7 +938,7 @@ func TestValidSnapshotEntries(t *testing.T) {
state2 := raftpb.HardState{Commit: 3, Term: 2}
snap4 := walpb.Snapshot{Index: 4, Term: 2, ConfState: &confState} // will be orphaned since the last committed entry will be snap3
func() {
w, err := Create(zap.NewExample(), p, nil)
w, err := Create(zaptest.NewLogger(t), p, nil)
if err != nil {
t.Fatal(err)
}
@ -966,7 +964,7 @@ func TestValidSnapshotEntries(t *testing.T) {
t.Fatal(err)
}
}()
walSnaps, err := ValidSnapshotEntries(zap.NewExample(), p)
walSnaps, err := ValidSnapshotEntries(zaptest.NewLogger(t), p)
if err != nil {
t.Fatal(err)
}
@ -992,7 +990,7 @@ func TestValidSnapshotEntriesAfterPurgeWal(t *testing.T) {
snap3 := walpb.Snapshot{Index: 3, Term: 2, ConfState: &confState}
state2 := raftpb.HardState{Commit: 3, Term: 2}
func() {
w, err := Create(zap.NewExample(), p, nil)
w, err := Create(zaptest.NewLogger(t), p, nil)
if err != nil {
t.Fatal(err)
}
@ -1023,7 +1021,7 @@ func TestValidSnapshotEntriesAfterPurgeWal(t *testing.T) {
t.Fatal(err)
}
os.Remove(p + "/" + files[0])
_, err = ValidSnapshotEntries(zap.NewExample(), p)
_, err = ValidSnapshotEntries(zaptest.NewLogger(t), p)
if err != nil {
t.Fatal(err)
}

View File

@ -20,8 +20,7 @@ import (
"testing"
"go.etcd.io/etcd/tests/v3/functional/rpcpb"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
func Test_read(t *testing.T) {
@ -262,10 +261,7 @@ func Test_read(t *testing.T) {
},
}
logger, err := zap.NewProduction()
if err != nil {
t.Fatal(err)
}
logger := zaptest.NewLogger(t)
defer logger.Sync()
cfg, err := read(logger, "../functional.yaml")

View File

@ -29,7 +29,7 @@ import (
"go.etcd.io/etcd/pkg/v3/pbutil"
"go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/storage/wal"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
)
func TestEtcdDumpLogEntryType(t *testing.T) {
@ -57,7 +57,7 @@ func TestEtcdDumpLogEntryType(t *testing.T) {
waldir := walDir(p)
snapdir := snapDir(p)
w, err := wal.Create(zap.NewExample(), waldir, nil)
w, err := wal.Create(zaptest.NewLogger(t), waldir, nil)
if err != nil {
t.Fatal(err)
}