integration: add constant RequestWaitTimeout.

release-3.4
Sam Batschelet 2017-12-09 13:16:27 -05:00
parent c699470d3b
commit d21fef2d41
8 changed files with 20 additions and 18 deletions

View File

@ -54,7 +54,7 @@ func TestBalancerUnderBlackholeKeepAliveWatch(t *testing.T) {
// TODO: only send healthy endpoint to gRPC so gRPC wont waste time to
// dial for unhealthy endpoint.
// then we can reduce 3s to 1s.
timeout := pingInterval + 3*time.Second
timeout := pingInterval + intergration.RequestWaitTimeout
cli, err := clientv3.New(ccfg)
if err != nil {

View File

@ -121,7 +121,7 @@ func testDialSetEndpoints(t *testing.T, setBefore bool) {
if !setBefore {
cli.SetEndpoints(eps[toKill%3], eps[(toKill+1)%3])
}
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), intergration.RequestWaitTimeout)
if _, err = cli.Get(ctx, "foo", clientv3.WithSerializable()); err != nil {
t.Fatal(err)
}

View File

@ -453,7 +453,7 @@ func TestKVGetErrConnClosed(t *testing.T) {
clus.TakeClient(0)
select {
case <-time.After(3 * time.Second):
case <-time.After(integration.RequestWaitTimeout):
t.Fatal("kv.Get took too long")
case <-donec:
}
@ -480,7 +480,7 @@ func TestKVNewAfterClose(t *testing.T) {
close(donec)
}()
select {
case <-time.After(3 * time.Second):
case <-time.After(integration.RequestWaitTimeout):
t.Fatal("kv.Get took too long")
case <-donec:
}

View File

@ -299,7 +299,7 @@ func TestLeaseGrantErrConnClosed(t *testing.T) {
}
select {
case <-time.After(3 * time.Second):
case <-time.After(integration.RequestWaitTimeout):
t.Fatal("le.Grant took too long")
case <-donec:
}
@ -325,7 +325,7 @@ func TestLeaseGrantNewAfterClose(t *testing.T) {
close(donec)
}()
select {
case <-time.After(3 * time.Second):
case <-time.After(integration.RequestWaitTimeout):
t.Fatal("le.Grant took too long")
case <-donec:
}
@ -357,7 +357,7 @@ func TestLeaseRevokeNewAfterClose(t *testing.T) {
close(donec)
}()
select {
case <-time.After(3 * time.Second):
case <-time.After(integration.RequestWaitTimeout):
t.Fatal("le.Revoke took too long")
case <-donec:
}

View File

@ -234,7 +234,7 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
wch := watchCli.Watch(clientv3.WithRequireLeader(context.Background()), "foo", clientv3.WithCreatedNotify())
select {
case <-wch:
case <-time.After(3 * time.Second):
case <-time.After(integration.RequestWaitTimeout):
t.Fatal("took too long to create watch")
}
@ -252,7 +252,7 @@ func testBalancerUnderNetworkPartitionWatch(t *testing.T, isolateLeader bool) {
if err = ev.Err(); err != rpctypes.ErrNoLeader {
t.Fatalf("expected %v, got %v", rpctypes.ErrNoLeader, err)
}
case <-time.After(3 * time.Second): // enough time to detect leader lost
case <-time.After(integration.RequestWaitTimeout): // enough time to detect leader lost
t.Fatal("took too long to detect leader lost")
}
}

View File

@ -63,7 +63,7 @@ func TestBalancerUnderServerShutdownWatch(t *testing.T) {
wch := watchCli.Watch(context.Background(), key, clientv3.WithCreatedNotify())
select {
case <-wch:
case <-time.After(3 * time.Second):
case <-time.After(integration.RequestWaitTimeout):
t.Fatal("took too long to create watch")
}
@ -348,7 +348,7 @@ func testBalancerUnderServerStopInflightRangeOnRestart(t *testing.T, linearizabl
clus.Members[target].Restart(t)
select {
case <-time.After(clientTimeout + 3*time.Second):
case <-time.After(clientTimeout + integration.RequestWaitTimeout):
t.Fatalf("timed out waiting for Get [linearizable: %v, opt: %+v]", linearizable, opt)
case <-donec:
}

View File

@ -678,7 +678,7 @@ func TestWatchErrConnClosed(t *testing.T) {
clus.TakeClient(0)
select {
case <-time.After(3 * time.Second):
case <-time.After(integration.RequestWaitTimeout):
t.Fatal("wc.Watch took too long")
case <-donec:
}
@ -705,7 +705,7 @@ func TestWatchAfterClose(t *testing.T) {
close(donec)
}()
select {
case <-time.After(3 * time.Second):
case <-time.After(integration.RequestWaitTimeout):
t.Fatal("wc.Watch took too long")
case <-donec:
}
@ -751,7 +751,7 @@ func TestWatchWithRequireLeader(t *testing.T) {
if resp.Err() != rpctypes.ErrNoLeader {
t.Fatalf("expected %v watch response error, got %+v", rpctypes.ErrNoLeader, resp)
}
case <-time.After(3 * time.Second):
case <-time.After(integration.RequestWaitTimeout):
t.Fatal("watch without leader took too long to close")
}
@ -760,7 +760,7 @@ func TestWatchWithRequireLeader(t *testing.T) {
if ok {
t.Fatalf("expected closed channel, got response %v", resp)
}
case <-time.After(3 * time.Second):
case <-time.After(integration.RequestWaitTimeout):
t.Fatal("waited too long for channel to close")
}

View File

@ -58,10 +58,12 @@ import (
)
const (
tickDuration = 10 * time.Millisecond
clusterName = "etcd"
requestTimeout = 20 * time.Second
// RequestWaitTimeout is the time duration to wait for a request to go through or detect leader loss.
RequestWaitTimeout = 3 * time.Second
tickDuration = 10 * time.Millisecond
requestTimeout = 20 * time.Second
clusterName = "etcd"
basePort = 21000
UrlScheme = "unix"
UrlSchemeTLS = "unixs"