Merge pull request #9951 from gyuho/revive
*: integrate https://github.com/mgechev/revive with fmt testsrelease-3.4
commit
0458c5d54b
|
@ -472,7 +472,7 @@ func TestHTTPClusterClientDoDeadlineExceedContext(t *testing.T) {
|
|||
|
||||
type fakeCancelContext struct{}
|
||||
|
||||
var fakeCancelContextError = errors.New("fake context canceled")
|
||||
var errFakeCancelContext = errors.New("fake context canceled")
|
||||
|
||||
func (f fakeCancelContext) Deadline() (time.Time, bool) { return time.Time{}, false }
|
||||
func (f fakeCancelContext) Done() <-chan struct{} {
|
||||
|
@ -480,7 +480,7 @@ func (f fakeCancelContext) Done() <-chan struct{} {
|
|||
d <- struct{}{}
|
||||
return d
|
||||
}
|
||||
func (f fakeCancelContext) Err() error { return fakeCancelContextError }
|
||||
func (f fakeCancelContext) Err() error { return errFakeCancelContext }
|
||||
func (f fakeCancelContext) Value(key interface{}) interface{} { return 1 }
|
||||
|
||||
func withTimeout(parent context.Context, timeout time.Duration) (
|
||||
|
@ -512,8 +512,8 @@ func TestHTTPClusterClientDoCanceledContext(t *testing.T) {
|
|||
|
||||
select {
|
||||
case err := <-errc:
|
||||
if err != fakeCancelContextError {
|
||||
t.Errorf("err = %+v, want %+v", err, fakeCancelContextError)
|
||||
if err != errFakeCancelContext {
|
||||
t.Errorf("err = %+v, want %+v", err, errFakeCancelContext)
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("unexpected timeout when waiting for request to fake context canceled")
|
||||
|
|
|
@ -46,7 +46,7 @@ func TestV2NoRetryEOF(t *testing.T) {
|
|||
conn.Close()
|
||||
}
|
||||
}()
|
||||
eofURL := integration.UrlScheme + "://" + lEOF.Addr().String()
|
||||
eofURL := integration.URLScheme + "://" + lEOF.Addr().String()
|
||||
cli := integration.MustNewHTTPClient(t, []string{eofURL, eofURL}, nil)
|
||||
kapi := client.NewKeysAPI(cli)
|
||||
for i, f := range noRetryList(kapi) {
|
||||
|
@ -64,16 +64,16 @@ func TestV2NoRetryEOF(t *testing.T) {
|
|||
// TestV2NoRetryNoLeader tests destructive api calls won't retry if given an error code.
|
||||
func TestV2NoRetryNoLeader(t *testing.T) {
|
||||
defer testutil.AfterTest(t)
|
||||
lHttp := integration.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid()))
|
||||
lHTTP := integration.NewListenerWithAddr(t, fmt.Sprintf("127.0.0.1:%05d", os.Getpid()))
|
||||
eh := &errHandler{errCode: http.StatusServiceUnavailable}
|
||||
srv := httptest.NewUnstartedServer(eh)
|
||||
defer lHttp.Close()
|
||||
defer lHTTP.Close()
|
||||
defer srv.Close()
|
||||
srv.Listener = lHttp
|
||||
srv.Listener = lHTTP
|
||||
go srv.Start()
|
||||
lHttpURL := integration.UrlScheme + "://" + lHttp.Addr().String()
|
||||
lHTTPURL := integration.URLScheme + "://" + lHTTP.Addr().String()
|
||||
|
||||
cli := integration.MustNewHTTPClient(t, []string{lHttpURL, lHttpURL}, nil)
|
||||
cli := integration.MustNewHTTPClient(t, []string{lHTTPURL, lHTTPURL}, nil)
|
||||
kapi := client.NewKeysAPI(cli)
|
||||
// test error code
|
||||
for i, f := range noRetryList(kapi) {
|
||||
|
@ -94,7 +94,7 @@ func TestV2RetryRefuse(t *testing.T) {
|
|||
cl.Launch(t)
|
||||
defer cl.Terminate(t)
|
||||
// test connection refused; expect no error failover
|
||||
cli := integration.MustNewHTTPClient(t, []string{integration.UrlScheme + "://refuseconn:123", cl.URL(0)}, nil)
|
||||
cli := integration.MustNewHTTPClient(t, []string{integration.URLScheme + "://refuseconn:123", cl.URL(0)}, nil)
|
||||
kapi := client.NewKeysAPI(cli)
|
||||
if _, err := kapi.Set(context.Background(), "/delkey", "def", nil); err != nil {
|
||||
t.Fatal(err)
|
||||
|
|
|
@ -157,7 +157,7 @@ func (b *builder) close(id string) {
|
|||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
func (r *builder) Scheme() string {
|
||||
func (b *builder) Scheme() string {
|
||||
return scheme
|
||||
}
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ func TestV3ClientMetrics(t *testing.T) {
|
|||
defer testutil.AfterTest(t)
|
||||
|
||||
var (
|
||||
addr string = "localhost:27989"
|
||||
addr = "localhost:27989"
|
||||
ln net.Listener
|
||||
err error
|
||||
)
|
||||
|
|
|
@ -295,7 +295,7 @@ func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAl
|
|||
}
|
||||
l.mu.Unlock()
|
||||
|
||||
go l.keepAliveCtxCloser(id, ctx, ka.donec)
|
||||
go l.keepAliveCtxCloser(ctx, id, ka.donec)
|
||||
l.firstKeepAliveOnce.Do(func() {
|
||||
go l.recvKeepAliveLoop()
|
||||
go l.deadlineLoop()
|
||||
|
@ -327,7 +327,7 @@ func (l *lessor) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-chan struct{}) {
|
||||
func (l *lessor) keepAliveCtxCloser(ctx context.Context, id LeaseID, donec <-chan struct{}) {
|
||||
select {
|
||||
case <-donec:
|
||||
return
|
||||
|
|
|
@ -47,7 +47,7 @@ var (
|
|||
// client-side streaming retry limit, only applied to requests where server responds with
|
||||
// a error code clearly indicating it was unable to process the request such as codes.Unavailable.
|
||||
// If set to 0, retry is disabled.
|
||||
defaultStreamMaxRetries uint = ^uint(0) // max uint
|
||||
defaultStreamMaxRetries = uint(^uint(0)) // max uint
|
||||
|
||||
// client-side retry backoff wait between requests.
|
||||
defaultBackoffWaitBetween = 25 * time.Millisecond
|
||||
|
|
|
@ -46,7 +46,7 @@ func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOpt
|
|||
}
|
||||
var lastErr error
|
||||
for attempt := uint(0); attempt < callOpts.max; attempt++ {
|
||||
if err := waitRetryBackoff(attempt, ctx, callOpts); err != nil {
|
||||
if err := waitRetryBackoff(ctx, attempt, callOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
lastErr = invoker(ctx, method, req, reply, cc, grpcOpts...)
|
||||
|
@ -173,7 +173,7 @@ func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error {
|
|||
}
|
||||
// We start off from attempt 1, because zeroth was already made on normal SendMsg().
|
||||
for attempt := uint(1); attempt < s.callOpts.max; attempt++ {
|
||||
if err := waitRetryBackoff(attempt, s.ctx, s.callOpts); err != nil {
|
||||
if err := waitRetryBackoff(s.ctx, attempt, s.callOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
newStream, err := s.reestablishStreamAndResendBuffer(s.ctx)
|
||||
|
@ -243,8 +243,8 @@ func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx
|
|||
return newStream, nil
|
||||
}
|
||||
|
||||
func waitRetryBackoff(attempt uint, ctx context.Context, callOpts *options) error {
|
||||
var waitTime time.Duration = 0
|
||||
func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) error {
|
||||
waitTime := time.Duration(0)
|
||||
if attempt > 0 {
|
||||
waitTime = callOpts.backoffFunc(attempt)
|
||||
}
|
||||
|
|
|
@ -393,7 +393,7 @@ func (rc *raftNode) serveChannels() {
|
|||
|
||||
// send proposals over raft
|
||||
go func() {
|
||||
var confChangeCount uint64 = 0
|
||||
confChangeCount := uint64(0)
|
||||
|
||||
for rc.proposeC != nil && rc.confChangeC != nil {
|
||||
select {
|
||||
|
@ -409,7 +409,7 @@ func (rc *raftNode) serveChannels() {
|
|||
if !ok {
|
||||
rc.confChangeC = nil
|
||||
} else {
|
||||
confChangeCount += 1
|
||||
confChangeCount++
|
||||
cc.ID = confChangeCount
|
||||
rc.node.ProposeConfChange(context.TODO(), cc)
|
||||
}
|
||||
|
|
|
@ -86,11 +86,11 @@ func (s *simplePrinter) Grant(resp v3.LeaseGrantResponse) {
|
|||
fmt.Printf("lease %016x granted with TTL(%ds)\n", resp.ID, resp.TTL)
|
||||
}
|
||||
|
||||
func (p *simplePrinter) Revoke(id v3.LeaseID, r v3.LeaseRevokeResponse) {
|
||||
func (s *simplePrinter) Revoke(id v3.LeaseID, r v3.LeaseRevokeResponse) {
|
||||
fmt.Printf("lease %016x revoked\n", id)
|
||||
}
|
||||
|
||||
func (p *simplePrinter) KeepAlive(resp v3.LeaseKeepAliveResponse) {
|
||||
func (s *simplePrinter) KeepAlive(resp v3.LeaseKeepAliveResponse) {
|
||||
fmt.Printf("lease %016x keepalived with TTL(%d)\n", resp.ID, resp.TTL)
|
||||
}
|
||||
|
||||
|
|
|
@ -160,12 +160,12 @@ func NewStore(lg *zap.Logger, server doer, timeout time.Duration) Store {
|
|||
// passwordStore implements PasswordStore using bcrypt to hash user passwords
|
||||
type passwordStore struct{}
|
||||
|
||||
func (_ passwordStore) CheckPassword(user User, password string) bool {
|
||||
func (passwordStore) CheckPassword(user User, password string) bool {
|
||||
err := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(password))
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (_ passwordStore) HashPassword(password string) (string, error) {
|
||||
func (passwordStore) HashPassword(password string) (string, error) {
|
||||
hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
|
||||
return string(hash), err
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
|
||||
type fakeDoer struct{}
|
||||
|
||||
func (_ fakeDoer) Do(context.Context, etcdserverpb.Request) (etcdserver.Response, error) {
|
||||
func (fakeDoer) Do(context.Context, etcdserverpb.Request) (etcdserver.Response, error) {
|
||||
return etcdserver.Response{}, nil
|
||||
}
|
||||
|
||||
|
@ -372,11 +372,11 @@ func TestEnsure(t *testing.T) {
|
|||
type fastPasswordStore struct {
|
||||
}
|
||||
|
||||
func (_ fastPasswordStore) CheckPassword(user User, password string) bool {
|
||||
func (fastPasswordStore) CheckPassword(user User, password string) bool {
|
||||
return user.Password == password
|
||||
}
|
||||
|
||||
func (_ fastPasswordStore) HashPassword(password string) (string, error) { return password, nil }
|
||||
func (fastPasswordStore) HashPassword(password string) (string, error) { return password, nil }
|
||||
|
||||
func TestCreateAndUpdateUser(t *testing.T) {
|
||||
olduser := `{"user": "cat", "roles" : ["animal"]}`
|
||||
|
|
|
@ -216,9 +216,8 @@ func (s *store) Set(nodePath string, dir bool, value string, expireOpts TTLOptio
|
|||
if getErr != nil {
|
||||
err = getErr
|
||||
return nil, err
|
||||
} else {
|
||||
value = n.Value
|
||||
}
|
||||
value = n.Value
|
||||
}
|
||||
|
||||
// Set new value
|
||||
|
|
|
@ -68,8 +68,8 @@ const (
|
|||
|
||||
clusterName = "etcd"
|
||||
basePort = 21000
|
||||
UrlScheme = "unix"
|
||||
UrlSchemeTLS = "unixs"
|
||||
URLScheme = "unix"
|
||||
URLSchemeTLS = "unixs"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -77,7 +77,7 @@ var (
|
|||
|
||||
// integration test uses unique ports, counting up, to listen for each
|
||||
// member, ensuring restarted members can listen on the same port again.
|
||||
localListenCount int64 = 0
|
||||
localListenCount = int64(0)
|
||||
|
||||
testTLSInfo = transport.TLSInfo{
|
||||
KeyFile: "./fixtures/server.key.insecure",
|
||||
|
@ -157,9 +157,9 @@ type cluster struct {
|
|||
|
||||
func schemeFromTLSInfo(tls *transport.TLSInfo) string {
|
||||
if tls == nil {
|
||||
return UrlScheme
|
||||
return URLScheme
|
||||
}
|
||||
return UrlSchemeTLS
|
||||
return URLSchemeTLS
|
||||
}
|
||||
|
||||
func (c *cluster) fillClusterForMembers() error {
|
||||
|
|
|
@ -18,6 +18,6 @@ package backend
|
|||
|
||||
import bolt "github.com/coreos/bbolt"
|
||||
|
||||
var boltOpenOptions *bolt.Options = nil
|
||||
var boltOpenOptions *bolt.Options
|
||||
|
||||
func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) }
|
||||
|
|
|
@ -324,22 +324,22 @@ func (ki *keyIndex) findGeneration(rev int64) *generation {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (a *keyIndex) Less(b btree.Item) bool {
|
||||
return bytes.Compare(a.key, b.(*keyIndex).key) == -1
|
||||
func (ki *keyIndex) Less(b btree.Item) bool {
|
||||
return bytes.Compare(ki.key, b.(*keyIndex).key) == -1
|
||||
}
|
||||
|
||||
func (a *keyIndex) equal(b *keyIndex) bool {
|
||||
if !bytes.Equal(a.key, b.key) {
|
||||
func (ki *keyIndex) equal(b *keyIndex) bool {
|
||||
if !bytes.Equal(ki.key, b.key) {
|
||||
return false
|
||||
}
|
||||
if a.modified != b.modified {
|
||||
if ki.modified != b.modified {
|
||||
return false
|
||||
}
|
||||
if len(a.generations) != len(b.generations) {
|
||||
if len(ki.generations) != len(b.generations) {
|
||||
return false
|
||||
}
|
||||
for i := range a.generations {
|
||||
ag, bg := a.generations[i], b.generations[i]
|
||||
for i := range ki.generations {
|
||||
ag, bg := ki.generations[i], b.generations[i]
|
||||
if !ag.equal(bg) {
|
||||
return false
|
||||
}
|
||||
|
@ -384,16 +384,16 @@ func (g *generation) String() string {
|
|||
return fmt.Sprintf("g: created[%d] ver[%d], revs %#v\n", g.created, g.ver, g.revs)
|
||||
}
|
||||
|
||||
func (a generation) equal(b generation) bool {
|
||||
if a.ver != b.ver {
|
||||
func (g generation) equal(b generation) bool {
|
||||
if g.ver != b.ver {
|
||||
return false
|
||||
}
|
||||
if len(a.revs) != len(b.revs) {
|
||||
if len(g.revs) != len(b.revs) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := range a.revs {
|
||||
ar, br := a.revs[i], b.revs[i]
|
||||
for i := range g.revs {
|
||||
ar, br := g.revs[i], b.revs[i]
|
||||
if ar != br {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -81,12 +81,12 @@ func (x *intervalNode) color() rbcolor {
|
|||
return x.c
|
||||
}
|
||||
|
||||
func (n *intervalNode) height() int {
|
||||
if n == nil {
|
||||
func (x *intervalNode) height() int {
|
||||
if x == nil {
|
||||
return 0
|
||||
}
|
||||
ld := n.left.height()
|
||||
rd := n.right.height()
|
||||
ld := x.left.height()
|
||||
rd := x.right.height()
|
||||
if ld < rd {
|
||||
return rd + 1
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ var byteOrder binary.ByteOrder
|
|||
func ByteOrder() binary.ByteOrder { return byteOrder }
|
||||
|
||||
func init() {
|
||||
var i int = 0x1
|
||||
i := int(0x1)
|
||||
if v := (*[intWidth]byte)(unsafe.Pointer(&i)); v[0] == 0 {
|
||||
byteOrder = binary.BigEndian
|
||||
} else {
|
||||
|
|
|
@ -34,7 +34,7 @@ func getAuthTokenFromClient(ctx context.Context) string {
|
|||
return ""
|
||||
}
|
||||
|
||||
func withClientAuthToken(ctx context.Context, ctxWithToken context.Context) context.Context {
|
||||
func withClientAuthToken(ctx, ctxWithToken context.Context) context.Context {
|
||||
token := getAuthTokenFromClient(ctxWithToken)
|
||||
if token != "" {
|
||||
ctx = context.WithValue(ctx, rpctypes.TokenFieldNameGRPC, token)
|
||||
|
|
13
test
13
test
|
@ -511,6 +511,18 @@ function staticcheck_pass {
|
|||
fi
|
||||
}
|
||||
|
||||
function revive_pass {
|
||||
if which revive >/dev/null; then
|
||||
reviveResult=$(revive -config ./tests/revive.toml -exclude "vendor/..." ./... 2>&1 || true)
|
||||
if [ -n "${reviveResult}" ]; then
|
||||
echo -e "revive checking failed:\\n${reviveResult}"
|
||||
exit 255
|
||||
fi
|
||||
else
|
||||
echo "Skipping revive..."
|
||||
fi
|
||||
}
|
||||
|
||||
function unconvert_pass {
|
||||
if which unconvert >/dev/null; then
|
||||
unconvertResult=$(unconvert -v "${STATIC_ANALYSIS_PATHS[@]}" 2>&1 || true)
|
||||
|
@ -615,6 +627,7 @@ function fmt_pass {
|
|||
unused \
|
||||
unparam \
|
||||
staticcheck \
|
||||
revive \
|
||||
unconvert \
|
||||
ineffassign \
|
||||
nakedret \
|
||||
|
|
|
@ -46,6 +46,7 @@ ADD ./scripts/install-marker.sh /tmp/install-marker.sh
|
|||
|
||||
RUN go get -v -u -tags spell github.com/chzchzchz/goword \
|
||||
&& go get -v -u github.com/coreos/license-bill-of-materials \
|
||||
&& go get -v -u github.com/mgechev/revive \
|
||||
&& go get -v -u github.com/mdempsky/unconvert \
|
||||
&& go get -v -u mvdan.cc/unparam \
|
||||
&& go get -v -u honnef.co/go/tools/cmd/gosimple \
|
||||
|
|
|
@ -148,10 +148,7 @@ func leaseTestGrantLeasesList(cx ctlCtx) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("lease id not in returned list (%v)", err)
|
||||
}
|
||||
if err = proc.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return proc.Close()
|
||||
}
|
||||
|
||||
func leaseTestTimeToLiveExpired(cx ctlCtx) {
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
ignoreGeneratedHeader = false
|
||||
severity = "warning"
|
||||
confidence = 0.8
|
||||
errorCode = 0
|
||||
warningCode = 0
|
||||
|
||||
[rule.blank-imports]
|
||||
[rule.context-as-argument]
|
||||
[rule.dot-imports]
|
||||
[rule.error-return]
|
||||
[rule.error-naming]
|
||||
[rule.if-return]
|
||||
[rule.increment-decrement]
|
||||
[rule.var-declaration]
|
||||
[rule.package-comments]
|
||||
[rule.range]
|
||||
[rule.receiver-naming]
|
||||
[rule.time-naming]
|
||||
[rule.indent-error-flow]
|
||||
[rule.errorf]
|
||||
|
||||
|
||||
# TODO: enable following
|
||||
|
||||
# grpcproxy context.WithValue(ctx, rpctypes.TokenFieldNameGRPC, token)
|
||||
# [rule.context-keys-type]
|
||||
|
||||
# punctuation in error value
|
||||
# [rule.error-strings]
|
||||
|
||||
# underscore variables
|
||||
# [rule.var-naming]
|
||||
|
||||
# godoc
|
||||
# [rule.exported]
|
||||
|
||||
# return unexported type
|
||||
# [rule.unexported-return]
|
|
@ -153,10 +153,7 @@ func TestRepairWriteTearLast(t *testing.T) {
|
|||
if terr := f.Truncate(1024); terr != nil {
|
||||
return terr
|
||||
}
|
||||
if terr := f.Truncate(offset); terr != nil {
|
||||
return terr
|
||||
}
|
||||
return nil
|
||||
return f.Truncate(offset)
|
||||
}
|
||||
testRepair(t, makeEnts(50), corruptf, 40)
|
||||
}
|
||||
|
|
|
@ -273,7 +273,7 @@ func TestSaveWithCut(t *testing.T) {
|
|||
const EntrySize int = 500
|
||||
SegmentSizeBytes = 2 * 1024
|
||||
defer func() { SegmentSizeBytes = restoreLater }()
|
||||
var index uint64 = 0
|
||||
index := uint64(0)
|
||||
for totalSize := 0; totalSize < int(SegmentSizeBytes); totalSize += EntrySize {
|
||||
ents := []raftpb.Entry{{Index: index, Term: 1, Data: bigData}}
|
||||
if err = w.Save(state, ents); err != nil {
|
||||
|
|
Loading…
Reference in New Issue