Merge pull request #15615 from serathius/robustness-snapshot-older-version

tests/robustness: Support running snapshot tests on older versions
storage-doc
Marek Siarkowicz 2023-04-03 12:13:01 +02:00 committed by GitHub
commit 7c7f636aea
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 82 additions and 71 deletions

View File

@ -49,12 +49,12 @@ func testDowngradeUpgrade(t *testing.T, clusterSize int) {
t.Skipf("%q does not exist", lastReleaseBinary)
}
currentVersion, err := getVersionFromBinary(currentEtcdBinary)
currentVersion, err := e2e.GetVersionFromBinary(currentEtcdBinary)
require.NoError(t, err)
// wipe any pre-release suffix like -alpha.0 we see commonly in builds
currentVersion.PreRelease = ""
lastVersion, err := getVersionFromBinary(lastReleaseBinary)
lastVersion, err := e2e.GetVersionFromBinary(lastReleaseBinary)
require.NoError(t, err)
require.Equalf(t, lastVersion.Minor, currentVersion.Minor-1, "unexpected minor version difference")
@ -245,19 +245,3 @@ func getMemberVersionByCurl(cfg *e2e.EtcdProcessClusterConfig, member e2e.EtcdPr
}
return result, nil
}
func getVersionFromBinary(binaryPath string) (*semver.Version, error) {
lines, err := e2e.RunUtilCompletion([]string{binaryPath, "--version"}, nil)
if err != nil {
return nil, fmt.Errorf("could not find binary version from %s, err: %w", binaryPath, err)
}
for _, line := range lines {
if strings.HasPrefix(line, "etcd Version:") {
versionString := strings.TrimSpace(strings.SplitAfter(line, ":")[1])
return semver.NewVersion(versionString)
}
}
return nil, fmt.Errorf("could not find version in binary output of %s, lines outputted were %v", binaryPath, lines)
}

View File

@ -112,7 +112,7 @@ func TestV2DeprecationSnapshotMatches(t *testing.T) {
if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) {
t.Skipf("%q does not exist", e2e.BinPath.EtcdLastRelease)
}
snapshotCount := 10
var snapshotCount uint64 = 10
epc := runEtcdAndCreateSnapshot(t, e2e.LastVersion, lastReleaseData, snapshotCount)
oldMemberDataDir := epc.Procs[0].Config().DataDirPath
cc1, err := e2e.NewEtcdctl(epc.Cfg.Client, epc.EndpointsV3())
@ -182,7 +182,7 @@ func TestV2DeprecationSnapshotRecover(t *testing.T) {
assert.NoError(t, epc.Close())
}
func runEtcdAndCreateSnapshot(t testing.TB, serverVersion e2e.ClusterVersion, dataDir string, snapshotCount int) *e2e.EtcdProcessCluster {
func runEtcdAndCreateSnapshot(t testing.TB, serverVersion e2e.ClusterVersion, dataDir string, snapshotCount uint64) *e2e.EtcdProcessCluster {
cfg := e2e.ConfigStandalone(*e2e.NewConfig(
e2e.WithVersion(serverVersion),
e2e.WithDataDirPath(dataDir),
@ -194,9 +194,10 @@ func runEtcdAndCreateSnapshot(t testing.TB, serverVersion e2e.ClusterVersion, da
return epc
}
func addAndRemoveKeysAndMembers(ctx context.Context, t testing.TB, cc *e2e.EtcdctlV3, snapshotCount int) (members []uint64) {
func addAndRemoveKeysAndMembers(ctx context.Context, t testing.TB, cc *e2e.EtcdctlV3, snapshotCount uint64) (members []uint64) {
// Execute some non-trivial key&member operation
for i := 0; i < snapshotCount*3; i++ {
var i uint64
for i = 0; i < snapshotCount*3; i++ {
err := cc.Put(ctx, fmt.Sprintf("%d", i), "1", config.PutOptions{})
assert.NoError(t, err)
}
@ -204,14 +205,14 @@ func addAndRemoveKeysAndMembers(ctx context.Context, t testing.TB, cc *e2e.Etcdc
assert.NoError(t, err)
members = append(members, member1.Member.ID)
for i := 0; i < snapshotCount*2; i++ {
for i = 0; i < snapshotCount*2; i++ {
_, err = cc.Delete(ctx, fmt.Sprintf("%d", i), config.DeleteOptions{})
assert.NoError(t, err)
}
_, err = cc.MemberRemove(ctx, member1.Member.ID)
assert.NoError(t, err)
for i := 0; i < snapshotCount; i++ {
for i = 0; i < snapshotCount; i++ {
err = cc.Put(ctx, fmt.Sprintf("%d", i), "2", config.PutOptions{})
assert.NoError(t, err)
}
@ -219,7 +220,7 @@ func addAndRemoveKeysAndMembers(ctx context.Context, t testing.TB, cc *e2e.Etcdc
assert.NoError(t, err)
members = append(members, member2.Member.ID)
for i := 0; i < snapshotCount/2; i++ {
for i = 0; i < snapshotCount/2; i++ {
err = cc.Put(ctx, fmt.Sprintf("%d", i), "3", config.PutOptions{})
assert.NoError(t, err)
}

View File

@ -35,7 +35,7 @@ type ClusterConfig struct {
QuotaBackendBytes int64
StrictReconfigCheck bool
AuthToken string
SnapshotCount int
SnapshotCount uint64
// ClusterContext is used by "e2e" or "integration" to extend the
// ClusterConfig. The common test cases shouldn't care about what
@ -81,7 +81,7 @@ func WithQuotaBackendBytes(bytes int64) ClusterOption {
return func(c *ClusterConfig) { c.QuotaBackendBytes = bytes }
}
func WithSnapshotCount(count int) ClusterOption {
func WithSnapshotCount(count uint64) ClusterOption {
return func(c *ClusterConfig) { c.SnapshotCount = count }
}

View File

@ -148,8 +148,8 @@ type EtcdProcessClusterConfig struct {
MetricsURLScheme string
SnapshotCount int // default is 10000
SnapshotCatchUpEntries int // default is 5000
SnapshotCount uint64
SnapshotCatchUpEntries uint64
Client ClientConfig
ClientHttpSeparate bool
@ -195,6 +195,9 @@ func DefaultConfig() *EtcdProcessClusterConfig {
InitialToken: "new",
StrictReconfigCheck: true,
CN: true,
SnapshotCount: etcdserver.DefaultSnapshotCount,
SnapshotCatchUpEntries: etcdserver.DefaultSnapshotCatchUpEntries,
}
}
@ -224,11 +227,11 @@ func WithKeepDataDir(keep bool) EPClusterOption {
return func(c *EtcdProcessClusterConfig) { c.KeepDataDir = keep }
}
func WithSnapshotCount(count int) EPClusterOption {
func WithSnapshotCount(count uint64) EPClusterOption {
return func(c *EtcdProcessClusterConfig) { c.SnapshotCount = count }
}
func WithSnapshotCatchUpEntries(count int) EPClusterOption {
func WithSnapshotCatchUpEntries(count uint64) EPClusterOption {
return func(c *EtcdProcessClusterConfig) { c.SnapshotCatchUpEntries = count }
}
@ -588,7 +591,7 @@ func (cfg *EtcdProcessClusterConfig) EtcdServerProcessConfig(tb testing.TB, i in
if cfg.WatchProcessNotifyInterval != 0 {
args = append(args, "--experimental-watch-progress-notify-interval", cfg.WatchProcessNotifyInterval.String())
}
if cfg.SnapshotCatchUpEntries > 0 {
if cfg.SnapshotCatchUpEntries != etcdserver.DefaultSnapshotCatchUpEntries {
if cfg.Version == CurrentVersion || (cfg.Version == MinorityLastVersion && i <= cfg.ClusterSize/2) || (cfg.Version == QuorumLastVersion && i > cfg.ClusterSize/2) {
args = append(args, "--experimental-snapshot-catchup-entries", fmt.Sprintf("%d", cfg.SnapshotCatchUpEntries))
}

View File

@ -27,6 +27,7 @@ import (
"testing"
"time"
"github.com/coreos/go-semver/semver"
"go.uber.org/zap"
"go.etcd.io/etcd/client/pkg/v3/fileutil"
@ -357,3 +358,19 @@ func fetchFailpoints(member EtcdProcess) (map[string]struct{}, error) {
}
return failpoints, nil
}
func GetVersionFromBinary(binaryPath string) (*semver.Version, error) {
lines, err := RunUtilCompletion([]string{binaryPath, "--version"}, nil)
if err != nil {
return nil, fmt.Errorf("could not find binary version from %s, err: %w", binaryPath, err)
}
for _, line := range lines {
if strings.HasPrefix(line, "etcd Version:") {
versionString := strings.TrimSpace(strings.SplitAfter(line, ":")[1])
return semver.NewVersion(versionString)
}
}
return nil, fmt.Errorf("could not find version in binary output of %s, lines outputted were %v", binaryPath, lines)
}

View File

@ -29,7 +29,7 @@ import (
)
const (
triggerTimeout = 10 * time.Second
triggerTimeout = 30 * time.Second
)
var (

View File

@ -24,6 +24,7 @@ import (
"go.uber.org/zap/zaptest"
"golang.org/x/sync/errgroup"
"go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/tests/v3/framework/e2e"
"go.etcd.io/etcd/tests/v3/robustness/model"
)
@ -78,6 +79,10 @@ var (
func TestRobustness(t *testing.T) {
testRunner.BeforeTest(t)
v, err := e2e.GetVersionFromBinary(e2e.BinPath.Etcd)
if err != nil {
t.Fatalf("Failed checking etcd version binary, binary: %q, err: %v", e2e.BinPath.Etcd, err)
}
type scenario struct {
name string
failpoint Failpoint
@ -112,44 +117,45 @@ func TestRobustness(t *testing.T) {
),
})
}
scenarios = append(scenarios, []scenario{
{
name: "Issue14370",
failpoint: RaftBeforeSavePanic,
config: *e2e.NewConfig(
e2e.WithClusterSize(1),
e2e.WithGoFailEnabled(true),
),
},
{
name: "Issue14685",
failpoint: DefragBeforeCopyPanic,
config: *e2e.NewConfig(
e2e.WithClusterSize(1),
e2e.WithGoFailEnabled(true),
),
},
{
name: "Issue13766",
failpoint: KillFailpoint,
traffic: &HighTraffic,
config: *e2e.NewConfig(
e2e.WithSnapshotCount(100),
),
},
{
name: "Snapshot",
failpoint: RandomSnapshotFailpoint,
traffic: &HighTraffic,
config: *e2e.NewConfig(
e2e.WithGoFailEnabled(true),
e2e.WithSnapshotCount(100),
e2e.WithSnapshotCatchUpEntries(100),
e2e.WithPeerProxy(true),
e2e.WithIsPeerTLS(true),
),
},
}...)
scenarios = append(scenarios, scenario{
name: "Issue14370",
failpoint: RaftBeforeSavePanic,
config: *e2e.NewConfig(
e2e.WithClusterSize(1),
e2e.WithGoFailEnabled(true),
),
})
scenarios = append(scenarios, scenario{
name: "Issue14685",
failpoint: DefragBeforeCopyPanic,
config: *e2e.NewConfig(
e2e.WithClusterSize(1),
e2e.WithGoFailEnabled(true),
),
})
scenarios = append(scenarios, scenario{
name: "Issue13766",
failpoint: KillFailpoint,
traffic: &HighTraffic,
config: *e2e.NewConfig(
e2e.WithSnapshotCount(100),
),
})
snapshotOptions := []e2e.EPClusterOption{
e2e.WithGoFailEnabled(true),
e2e.WithSnapshotCount(100),
e2e.WithPeerProxy(true),
e2e.WithIsPeerTLS(true),
}
if !v.LessThan(version.V3_6) {
snapshotOptions = append(snapshotOptions, e2e.WithSnapshotCatchUpEntries(100))
}
scenarios = append(scenarios, scenario{
name: "Snapshot",
failpoint: RandomSnapshotFailpoint,
traffic: &HighTraffic,
config: *e2e.NewConfig(snapshotOptions...),
})
for _, scenario := range scenarios {
if scenario.traffic == nil {
scenario.traffic = &defaultTraffic