integration: prevent goroutines leaks in test (#11318)

Some goroutines in test functions will be leaked in certain cases.
This patch stops these leaks no matter what happens in test, by
putting the blocking channel in select together with a new stopc
channel, or adding 1 buffer to the blocking channel.
release-3.5
lzhfromustc 2019-12-05 18:40:10 -05:00 committed by Xiang Li
parent fd2dddb39f
commit 1f8764be3b
5 changed files with 35 additions and 7 deletions

View File

@ -47,13 +47,20 @@ func testBarrier(t *testing.T, waiters int, chooseClient func() *clientv3.Client
}
donec := make(chan struct{})
stopc := make(chan struct{})
defer close(stopc)
for i := 0; i < waiters; i++ {
go func() {
br := recipe.NewBarrier(chooseClient(), "test-barrier")
if err := br.Wait(); err != nil {
t.Errorf("could not wait on barrier (%v)", err)
}
donec <- struct{}{}
select {
case donec <- struct{}{}:
case <-stopc:
}
}()
}

View File

@ -39,10 +39,16 @@ func testMoveLeader(t *testing.T, auto bool) {
// ensure followers go through leader transition while learship transfer
idc := make(chan uint64)
stopc := make(chan struct{})
defer close(stopc)
for i := range clus.Members {
if oldLeadIdx != i {
go func(m *member) {
idc <- checkLeaderTransition(m, oldLeadID)
select {
case idc <- checkLeaderTransition(m, oldLeadID):
case <-stopc:
}
}(clus.Members[i])
}
}

View File

@ -49,6 +49,9 @@ func TestMutexLockMultiNode(t *testing.T) {
func testMutexLock(t *testing.T, waiters int, chooseClient func() *clientv3.Client) {
// stream lock acquisitions
lockedC := make(chan *concurrency.Mutex)
stopC := make(chan struct{})
defer close(stopC)
for i := 0; i < waiters; i++ {
go func() {
session, err := concurrency.NewSession(chooseClient())
@ -59,7 +62,11 @@ func testMutexLock(t *testing.T, waiters int, chooseClient func() *clientv3.Clie
if err := m.Lock(context.TODO()); err != nil {
t.Errorf("could not wait on lock (%v)", err)
}
lockedC <- m
select {
case lockedC <- m:
case <-stopC:
}
}()
}
// unlock locked mutexes
@ -103,6 +110,8 @@ func TestMutexTryLockMultiNode(t *testing.T) {
func testMutexTryLock(t *testing.T, lockers int, chooseClient func() *clientv3.Client) {
lockedC := make(chan *concurrency.Mutex)
notlockedC := make(chan *concurrency.Mutex)
stopC := make(chan struct{})
defer close(stopC)
for i := 0; i < lockers; i++ {
go func() {
session, err := concurrency.NewSession(chooseClient())
@ -112,9 +121,15 @@ func testMutexTryLock(t *testing.T, lockers int, chooseClient func() *clientv3.C
m := concurrency.NewMutex(session, "test-mutex-try-lock")
err = m.TryLock(context.TODO())
if err == nil {
lockedC <- m
select {
case lockedC <- m:
case <-stopC:
}
} else if err == concurrency.ErrLocked {
notlockedC <- m
select {
case notlockedC <- m:
case <-stopC:
}
} else {
t.Errorf("Unexpected Error %v", err)
}

View File

@ -1188,7 +1188,7 @@ func TestV3WatchWithPrevKV(t *testing.T) {
t.Fatal(err)
}
recv := make(chan *pb.WatchResponse)
recv := make(chan *pb.WatchResponse, 1)
go func() {
// check received PUT
resp, rerr := ws.Recv()

View File

@ -97,7 +97,7 @@ func TestV3ElectionObserve(t *testing.T) {
lc := toGRPC(clus.Client(0)).Election
// observe leadership events
observec := make(chan struct{})
observec := make(chan struct{}, 1)
go func() {
defer close(observec)
s, err := lc.Observe(context.Background(), &epb.LeaderRequest{Name: []byte("foo")})