Compare commits

..

14 Commits

Author SHA1 Message Date
Gyuho Lee
9e079d8f02 version: 3.3.0-rc.2
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2018-01-11 11:18:46 -08:00
Gyuho Lee
bd57c9ca5b etcd-tester: fix "writeTxn" key selection
Found when debugging https://github.com/coreos/etcd/issues/9130.

Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2018-01-11 11:18:05 -08:00
Gyu-Ho Lee
58c402a47b test: limit stress-qps for slow CI machines, add txn flags
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2018-01-09 14:18:45 -08:00
Gyu-Ho Lee
3ce73b70bc etcd-tester: add txn stresser
Signed-off-by: Gyu-Ho Lee <gyuhox@gmail.com>
2018-01-09 14:18:33 -08:00
Gyuho Lee
ee3c81d8d3 ctlv3: add "snapshot restore --wal-dir"
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2018-01-09 11:12:29 -08:00
Sahdev P. Zala
2dfabfbef6 DocCommand: use regex wildcard
The current command as such produces no output on mac term or bash shell.
Using regex wildcard works fine on mac and linux.
2018-01-09 09:11:16 -08:00
Gyuho Lee
bf83d5269f clientv3/integration: fix typos
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2018-01-09 09:11:15 -08:00
Sam Batschelet
a609b1eb47 integration: add constant RequestWaitTimeout. 2018-01-09 09:11:15 -08:00
Iwasaki Yudai
1ae0c0b47d mvcc: check null before set FillPercent not to panic
Since CreateBucketIfNotExists() can return nil when it gets an error,
accessing FillPercent must be done after a nil check, not to cause
a panic.
2018-01-08 13:08:03 -08:00
Sahdev P. Zala
ec43197344 etcdserver/api/v3rpc: debug user cancellation and log warning for rest
The context error with cancel code is typically for user cancellation which
should be at debug level. For other error codes we should display a warning.

Fixes #9085
2018-01-08 10:14:37 -08:00
Quentin MACHU
70ba0518f1 embed: enable extensive metrics if specified 2018-01-07 18:48:59 -08:00
Gyuho Lee
e330f5004f etcdmain: unset ETCD_UNSUPPORTED_ARCH after arch check
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2018-01-05 03:38:35 +00:00
Gyuho Lee
0ec5023b7b pkg/expect: fix deadlock in mac OS
bufio.NewReader.ReadString blocks even
when the process received syscall.SIGKILL.
Remove ptyMu mutex and make ReadString return
when *os.File is closed.

Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2018-01-02 14:34:01 -08:00
Gyuho Lee
0f69520622 version: bump up to 3.3.0-rc.1+git
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
2018-01-02 14:33:10 -08:00
23 changed files with 201 additions and 56 deletions

View File

@@ -87,7 +87,7 @@ Removing excessive keyspace data and defragmenting the backend database will put
```sh
# get current revision
$ rev=$(ETCDCTL_API=3 etcdctl --endpoints=:2379 endpoint status --write-out="json" | egrep -o '"revision":[0-9]*' | egrep -o '[0-9]*')
$ rev=$(ETCDCTL_API=3 etcdctl --endpoints=:2379 endpoint status --write-out="json" | egrep -o '"revision":[0-9]*' | egrep -o '[0-9].*')
# compact away all old revisions
$ ETCDCTL_API=3 etcdctl compact $rev
compacted revision 1516

View File

@@ -54,7 +54,7 @@ func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) {
// TODO: only send healthy endpoint to gRPC so gRPC wont waste time to
// dial for unhealthy endpoint.
// then we can reduce 3s to 1s.
timeout := pingInterval + 3*time.Second
timeout := pingInterval + integration.RequestWaitTimeout
cli, err := clientv3.New(ccfg)
if err != nil {

View File

@@ -121,7 +121,7 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) {
if !setBefore {
cli.SetEndpoints(eps[toKill%3], eps[(toKill+1)%3])
}
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), integration.RequestWaitTimeout)
if _, err = cli.Get(ctx, "foo", clientv3.WithSerializable()); err != nil {
t.Fatal(err)
}

View File

@@ -453,7 +453,7 @@ func TestKVGetErrConnClosed(t *testing.T) {
clus.TakeClient(0)
select {
case <-time.After(3 * time.Second):
case <-time.After(integration.RequestWaitTimeout):
t.Fatal("kv.Get took too long")
case <-donec:
}
@@ -480,7 +480,7 @@ func TestKVNewAfterClose(t *testing.T) {
close(donec)
}()
select {
case <-time.After(3 * time.Second):
case <-time.After(integration.RequestWaitTimeout):
t.Fatal("kv.Get took too long")
case <-donec:
}

View File

@@ -299,7 +299,7 @@ func TestLeaseGrantErrConnClosed(t *testing.T) {
}
select {
case <-time.After(3 * time.Second):
case <-time.After(integration.RequestWaitTimeout):
t.Fatal("le.Grant took too long")
case <-donec:
}
@@ -325,7 +325,7 @@ func TestLeaseGrantNewAfterClose(t *testing.T) {
close(donec)
}()
select {
case <-time.After(3 * time.Second):
case <-time.After(integration.RequestWaitTimeout):
t.Fatal("le.Grant took too long")
case <-donec:
}
@@ -357,7 +357,7 @@ func TestLeaseRevokeNewAfterClose(t *testing.T) {
close(donec)
}()
select {
case <-time.After(3 * time.Second):
case <-time.After(integration.RequestWaitTimeout):
t.Fatal("le.Revoke took too long")
case <-donec:
}

View File

@@ -234,7 +234,7 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
wch := watchCli.Watch(clientv3.WithRequireLeader(context.Background()), "foo", clientv3.WithCreatedNotify())
select {
case <-wch:
case <-time.After(3 * time.Second):
case <-time.After(integration.RequestWaitTimeout):
t.Fatal("took too long to create watch")
}
@@ -252,7 +252,7 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
if err = ev.Err(); err != rpctypes.ErrNoLeader {
t.Fatalf("expected %v, got %v", rpctypes.ErrNoLeader, err)
}
case <-time.After(3 * time.Second): // enough time to detect leader lost
case <-time.After(integration.RequestWaitTimeout): // enough time to detect leader lost
t.Fatal("took too long to detect leader lost")
}
}

View File

@@ -63,7 +63,7 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) {
wch := watchCli.Watch(context.Background(), key, clientv3.WithCreatedNotify())
select {
case <-wch:
case <-time.After(3 * time.Second):
case <-time.After(integration.RequestWaitTimeout):
t.Fatal("took too long to create watch")
}
@@ -348,7 +348,7 @@ func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizabl
clus.Members[target].Restart(t)
select {
case <-time.After(clientTimeout + 3*time.Second):
case <-time.After(clientTimeout + integration.RequestWaitTimeout):
t.Fatalf("timed out waiting for Get [linearizable: %v, opt: %+v]", linearizable, opt)
case <-donec:
}

View File

@@ -678,7 +678,7 @@ func TestWatchErrConnClosed(t *testing.T) {
clus.TakeClient(0)
select {
case <-time.After(3 * time.Second):
case <-time.After(integration.RequestWaitTimeout):
t.Fatal("wc.Watch took too long")
case <-donec:
}
@@ -705,7 +705,7 @@ func TestWatchAfterClose(t *testing.T) {
close(donec)
}()
select {
case <-time.After(3 * time.Second):
case <-time.After(integration.RequestWaitTimeout):
t.Fatal("wc.Watch took too long")
case <-donec:
}
@@ -751,7 +751,7 @@ func TestWatchWithRequireLeader(t *testing.T) {
if resp.Err() != rpctypes.ErrNoLeader {
t.Fatalf("expected %v watch response error, got %+v", rpctypes.ErrNoLeader, resp)
}
case <-time.After(3 * time.Second):
case <-time.After(integration.RequestWaitTimeout):
t.Fatal("watch without leader took too long to close")
}
@@ -760,7 +760,7 @@ func TestWatchWithRequireLeader(t *testing.T) {
if ok {
t.Fatalf("expected closed channel, got response %v", resp)
}
case <-time.After(3 * time.Second):
case <-time.After(integration.RequestWaitTimeout):
t.Fatal("waited too long for channel to close")
}

View File

@@ -41,6 +41,7 @@ import (
"github.com/coreos/etcd/rafthttp"
"github.com/coreos/pkg/capnslog"
"github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/soheilhy/cmux"
"google.golang.org/grpc"
"google.golang.org/grpc/keepalive"
@@ -523,6 +524,10 @@ func (e *Etcd) serveClients() (err error) {
}
func (e *Etcd) serveMetrics() (err error) {
if e.cfg.Metrics == "extensive" {
grpc_prometheus.EnableHandlingTimeHistogram()
}
if len(e.cfg.ListenMetricsUrls) > 0 {
metricsMux := http.NewServeMux()
etcdhttp.HandleMetricsHealth(metricsMux, e.Server)

View File

@@ -874,6 +874,8 @@ The snapshot restore options closely resemble to those used in the `etcd` comman
- data-dir -- Path to the data directory. Uses \<name\>.etcd if none given.
- wal-dir -- Path to the WAL directory. Uses data directory if none given.
- initial-cluster -- The initial cluster configuration for the restored etcd cluster.
- initial-cluster-token -- Initial cluster token for the restored etcd cluster.

View File

@@ -56,6 +56,7 @@ var (
restoreCluster string
restoreClusterToken string
restoreDataDir string
restoreWalDir string
restorePeerURLs string
restoreName string
skipHashCheck bool
@@ -99,6 +100,7 @@ func NewSnapshotRestoreCommand() *cobra.Command {
Run: snapshotRestoreCommandFunc,
}
cmd.Flags().StringVar(&restoreDataDir, "data-dir", "", "Path to the data directory")
cmd.Flags().StringVar(&restoreWalDir, "wal-dir", "", "Path to the WAL directory (use --data-dir if none given)")
cmd.Flags().StringVar(&restoreCluster, "initial-cluster", initialClusterFromName(defaultName), "Initial cluster configuration for restore bootstrap")
cmd.Flags().StringVar(&restoreClusterToken, "initial-cluster-token", "etcd-cluster", "Initial cluster token for the etcd cluster during restore bootstrap")
cmd.Flags().StringVar(&restorePeerURLs, "initial-advertise-peer-urls", defaultInitialAdvertisePeerURLs, "List of this member's peer URLs to advertise to the rest of the cluster")
@@ -187,7 +189,10 @@ func snapshotRestoreCommandFunc(cmd *cobra.Command, args []string) {
basedir = restoreName + ".etcd"
}
waldir := filepath.Join(basedir, "member", "wal")
waldir := restoreWalDir
if waldir == "" {
waldir = filepath.Join(basedir, "member", "wal")
}
snapdir := filepath.Join(basedir, "member", "snap")
if _, err := os.Stat(basedir); err == nil {

View File

@@ -40,7 +40,6 @@ import (
"github.com/coreos/etcd/version"
"github.com/coreos/pkg/capnslog"
"github.com/grpc-ecosystem/go-grpc-prometheus"
"google.golang.org/grpc"
)
@@ -179,10 +178,6 @@ func startEtcdOrProxyV2() {
// startEtcd runs StartEtcd in addition to hooks needed for standalone etcd.
func startEtcd(cfg *embed.Config) (<-chan struct{}, <-chan error, error) {
if cfg.Metrics == "extensive" {
grpc_prometheus.EnableHandlingTimeHistogram()
}
e, err := embed.StartEtcd(cfg)
if err != nil {
return nil, nil, err
@@ -392,6 +387,9 @@ func checkSupportArch() {
if runtime.GOARCH == "amd64" || runtime.GOARCH == "ppc64le" {
return
}
// unsupported arch only configured via environment variable
// so unset here to not parse through flag
defer os.Unsetenv("ETCD_UNSUPPORTED_ARCH")
if env, ok := os.LookupEnv("ETCD_UNSUPPORTED_ARCH"); ok && env == runtime.GOARCH {
plog.Warningf("running etcd on unsupported architecture %q since ETCD_UNSUPPORTED_ARCH is set", env)
return

View File

@@ -107,7 +107,11 @@ func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) erro
return nil
}
if err != nil {
plog.Debugf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error())
if isClientCtxErr(stream.Context().Err(), err) {
plog.Debugf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error())
} else {
plog.Warningf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error())
}
return err
}
@@ -133,7 +137,11 @@ func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) erro
resp.TTL = ttl
err = stream.Send(resp)
if err != nil {
plog.Debugf("failed to send lease keepalive response to gRPC stream (%q)", err.Error())
if isClientCtxErr(stream.Context().Err(), err) {
plog.Debugf("failed to send lease keepalive response to gRPC stream (%q)", err.Error())
} else {
plog.Warningf("failed to send lease keepalive response to gRPC stream (%q)", err.Error())
}
return err
}
}

View File

@@ -81,3 +81,16 @@ func togRPCError(err error) error {
}
return grpcErr
}
func isClientCtxErr(ctxErr error, err error) bool {
if ctxErr != nil {
return true
}
ev, ok := status.FromError(err)
if !ok {
return false
}
code := ev.Code()
return code == codes.Canceled || code == codes.DeadlineExceeded
}

View File

@@ -140,7 +140,11 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
// deadlock when calling sws.close().
go func() {
if rerr := sws.recvLoop(); rerr != nil {
plog.Debugf("failed to receive watch request from gRPC stream (%q)", rerr.Error())
if isClientCtxErr(stream.Context().Err(), rerr) {
plog.Debugf("failed to receive watch request from gRPC stream (%q)", rerr.Error())
} else {
plog.Warningf("failed to receive watch request from gRPC stream (%q)", rerr.Error())
}
errc <- rerr
}
}()
@@ -339,7 +343,11 @@ func (sws *serverWatchStream) sendLoop() {
mvcc.ReportEventReceived(len(evs))
if err := sws.gRPCStream.Send(wr); err != nil {
plog.Debugf("failed to send watch response to gRPC stream (%q)", err.Error())
if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
plog.Debugf("failed to send watch response to gRPC stream (%q)", err.Error())
} else {
plog.Warningf("failed to send watch response to gRPC stream (%q)", err.Error())
}
return
}
@@ -356,7 +364,11 @@ func (sws *serverWatchStream) sendLoop() {
}
if err := sws.gRPCStream.Send(c); err != nil {
plog.Debugf("failed to send watch control response to gRPC stream (%q)", err.Error())
if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
plog.Debugf("failed to send watch control response to gRPC stream (%q)", err.Error())
} else {
plog.Warningf("failed to send watch control response to gRPC stream (%q)", err.Error())
}
return
}
@@ -372,7 +384,11 @@ func (sws *serverWatchStream) sendLoop() {
for _, v := range pending[wid] {
mvcc.ReportEventReceived(len(v.Events))
if err := sws.gRPCStream.Send(v); err != nil {
plog.Debugf("failed to send pending watch response to gRPC stream (%q)", err.Error())
if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
plog.Debugf("failed to send pending watch response to gRPC stream (%q)", err.Error())
} else {
plog.Warningf("failed to send pending watch response to gRPC stream (%q)", err.Error())
}
return
}
}

View File

@@ -58,10 +58,12 @@ import (
)
const (
tickDuration = 10 * time.Millisecond
clusterName = "etcd"
requestTimeout = 20 * time.Second
// RequestWaitTimeout is the time duration to wait for a request to go through or detect leader loss.
RequestWaitTimeout = 3 * time.Second
tickDuration = 10 * time.Millisecond
requestTimeout = 20 * time.Second
clusterName = "etcd"
basePort = 21000
UrlScheme = "unix"
UrlSchemeTLS = "unixs"

View File

@@ -373,10 +373,10 @@ func defragdb(odb, tmpdb *bolt.DB, limit int) error {
}
tmpb, berr := tmptx.CreateBucketIfNotExists(next)
tmpb.FillPercent = 0.9 // for seq write in for each
if berr != nil {
return berr
}
tmpb.FillPercent = 0.9 // for seq write in for each
b.ForEach(func(k, v []byte) error {
count++

View File

@@ -33,7 +33,6 @@ type ExpectProcess struct {
fpty *os.File
wg sync.WaitGroup
ptyMu sync.Mutex // protects accessing fpty
cond *sync.Cond // for broadcasting updates are available
mu sync.Mutex // protects lines and err
lines []string
@@ -76,9 +75,7 @@ func (ep *ExpectProcess) read() {
printDebugLines := os.Getenv("EXPECT_DEBUG") != ""
r := bufio.NewReader(ep.fpty)
for ep.err == nil {
ep.ptyMu.Lock()
l, rerr := r.ReadString('\n')
ep.ptyMu.Unlock()
ep.mu.Lock()
ep.err = rerr
if l != "" {
@@ -150,9 +147,7 @@ func (ep *ExpectProcess) close(kill bool) error {
}
err := ep.cmd.Wait()
ep.ptyMu.Lock()
ep.fpty.Close()
ep.ptyMu.Unlock()
ep.wg.Wait()
if err != nil {

3
test
View File

@@ -133,6 +133,9 @@ function functional_pass {
-peer-ports 12380,22380,32380 \
-limit 1 \
-schedule-cases "0 1 2 3 4 5" \
-stress-qps 1000 \
-stress-key-txn-count 100 \
-stress-key-txn-ops 10 \
-exit-on-failure && echo "'etcd-tester' succeeded"
ETCD_TESTER_EXIT_CODE=$?
echo "ETCD_TESTER_EXIT_CODE:" ${ETCD_TESTER_EXIT_CODE}

View File

@@ -34,9 +34,11 @@ import (
type keyStresser struct {
Endpoint string
keyLargeSize int
keySize int
keySuffixRange int
keyLargeSize int
keySize int
keySuffixRange int
keyTxnSuffixRange int
keyTxnOps int
N int
@@ -77,6 +79,15 @@ func (s *keyStresser) Stress() error {
{weight: 0.07, f: newStressDelete(kvc, s.keySuffixRange)},
{weight: 0.07, f: newStressDeleteInterval(kvc, s.keySuffixRange)},
}
if s.keyTxnSuffixRange > 0 {
// adjust to make up ±70% of workloads with writes
stressEntries[0].weight = 0.24
stressEntries[1].weight = 0.24
stressEntries = append(stressEntries, stressEntry{
weight: 0.24,
f: newStressTxn(kvc, s.keyTxnSuffixRange, s.keyTxnOps),
})
}
s.stressTable = createStressTable(stressEntries)
for i := 0; i < s.N; i++ {
@@ -202,6 +213,79 @@ func newStressPut(kvc pb.KVClient, keySuffixRange, keySize int) stressFunc {
}
}
func newStressTxn(kvc pb.KVClient, keyTxnSuffixRange, txnOps int) stressFunc {
keys := make([]string, keyTxnSuffixRange)
for i := range keys {
keys[i] = fmt.Sprintf("/k%03d", i)
}
return writeTxn(kvc, keys, txnOps)
}
func writeTxn(kvc pb.KVClient, keys []string, txnOps int) stressFunc {
return func(ctx context.Context) (error, int64) {
ks := make(map[string]struct{}, txnOps)
for len(ks) != txnOps {
ks[keys[rand.Intn(len(keys))]] = struct{}{}
}
selected := make([]string, 0, txnOps)
for k := range ks {
selected = append(selected, k)
}
com, delOp, putOp := getTxnReqs(selected[0], "bar00")
txnReq := &pb.TxnRequest{
Compare: []*pb.Compare{com},
Success: []*pb.RequestOp{delOp},
Failure: []*pb.RequestOp{putOp},
}
// add nested txns if any
for i := 1; i < txnOps; i++ {
k, v := selected[i], fmt.Sprintf("bar%02d", i)
com, delOp, putOp = getTxnReqs(k, v)
nested := &pb.RequestOp{
Request: &pb.RequestOp_RequestTxn{
RequestTxn: &pb.TxnRequest{
Compare: []*pb.Compare{com},
Success: []*pb.RequestOp{delOp},
Failure: []*pb.RequestOp{putOp},
},
},
}
txnReq.Success = append(txnReq.Success, nested)
txnReq.Failure = append(txnReq.Failure, nested)
}
_, err := kvc.Txn(ctx, txnReq, grpc.FailFast(false))
return err, int64(txnOps)
}
}
func getTxnReqs(key, val string) (com *pb.Compare, delOp *pb.RequestOp, putOp *pb.RequestOp) {
// if key exists (version > 0)
com = &pb.Compare{
Key: []byte(key),
Target: pb.Compare_VERSION,
Result: pb.Compare_GREATER,
TargetUnion: &pb.Compare_Version{Version: 0},
}
delOp = &pb.RequestOp{
Request: &pb.RequestOp_RequestDeleteRange{
RequestDeleteRange: &pb.DeleteRangeRequest{
Key: []byte(key),
},
},
}
putOp = &pb.RequestOp{
Request: &pb.RequestOp_RequestPut{
RequestPut: &pb.PutRequest{
Key: []byte(key),
Value: []byte(val),
},
},
}
return com, delOp, putOp
}
func newStressRange(kvc pb.KVClient, keySuffixRange int) stressFunc {
return func(ctx context.Context) (error, int64) {
_, err := kvc.Range(ctx, &pb.RangeRequest{

View File

@@ -47,6 +47,8 @@ func main() {
stressKeyLargeSize := flag.Uint("stress-key-large-size", 32*1024+1, "the size of each large key written into etcd.")
stressKeySize := flag.Uint("stress-key-size", 100, "the size of each small key written into etcd.")
stressKeySuffixRange := flag.Uint("stress-key-count", 250000, "the count of key range written into etcd.")
stressKeyTxnSuffixRange := flag.Uint("stress-key-txn-count", 100, "the count of key range written into etcd txn (max 100).")
stressKeyTxnOps := flag.Uint("stress-key-txn-ops", 1, "number of operations per a transaction (max 64).")
limit := flag.Int("limit", -1, "the limit of rounds to run failure set (-1 to run without limits).")
exitOnFailure := flag.Bool("exit-on-failure", false, "exit tester on first failure")
stressQPS := flag.Int("stress-qps", 10000, "maximum number of stresser requests per second.")
@@ -120,15 +122,23 @@ func main() {
}
scfg := stressConfig{
rateLimiter: rate.NewLimiter(rate.Limit(*stressQPS), *stressQPS),
keyLargeSize: int(*stressKeyLargeSize),
keySize: int(*stressKeySize),
keySuffixRange: int(*stressKeySuffixRange),
numLeases: 10,
keysPerLease: 10,
rateLimiter: rate.NewLimiter(rate.Limit(*stressQPS), *stressQPS),
keyLargeSize: int(*stressKeyLargeSize),
keySize: int(*stressKeySize),
keySuffixRange: int(*stressKeySuffixRange),
keyTxnSuffixRange: int(*stressKeyTxnSuffixRange),
keyTxnOps: int(*stressKeyTxnOps),
numLeases: 10,
keysPerLease: 10,
etcdRunnerPath: *etcdRunnerPath,
}
if scfg.keyTxnSuffixRange > 100 {
plog.Fatalf("stress-key-txn-count is maximum 100, got %d", scfg.keyTxnSuffixRange)
}
if scfg.keyTxnOps > 64 {
plog.Fatalf("stress-key-txn-ops is maximum 64, got %d", scfg.keyTxnOps)
}
t := &tester{
failures: schedule,

View File

@@ -113,9 +113,11 @@ func (cs *compositeStresser) Checker() Checker {
}
type stressConfig struct {
keyLargeSize int
keySize int
keySuffixRange int
keyLargeSize int
keySize int
keySuffixRange int
keyTxnSuffixRange int
keyTxnOps int
numLeases int
keysPerLease int
@@ -142,12 +144,14 @@ func NewStresser(s string, sc *stressConfig, m *member) Stresser {
// TODO: Too intensive stressers can panic etcd member with
// 'out of memory' error. Put rate limits in server side.
return &keyStresser{
Endpoint: m.grpcAddr(),
keyLargeSize: sc.keyLargeSize,
keySize: sc.keySize,
keySuffixRange: sc.keySuffixRange,
N: 100,
rateLimiter: sc.rateLimiter,
Endpoint: m.grpcAddr(),
keyLargeSize: sc.keyLargeSize,
keySize: sc.keySize,
keySuffixRange: sc.keySuffixRange,
keyTxnSuffixRange: sc.keyTxnSuffixRange,
keyTxnOps: sc.keyTxnOps,
N: 100,
rateLimiter: sc.rateLimiter,
}
case "v2keys":
return &v2Stresser{

View File

@@ -26,7 +26,7 @@ import (
var (
// MinClusterVersion is the min cluster version this etcd binary is compatible with.
MinClusterVersion = "3.0.0"
Version = "3.3.0-rc.1"
Version = "3.3.0-rc.2"
APIVersion = "unknown"
// Git SHA Value will be set during build