etcd/integration/cluster_test.go

859 lines
23 KiB
Go
Raw Normal View History

// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
2014-10-17 10:59:58 +04:00
package integration
2014-10-17 00:35:59 +04:00
import (
"fmt"
"io/ioutil"
"log"
2014-12-05 01:01:03 +03:00
"math/rand"
2014-10-17 00:35:59 +04:00
"net"
"net/http"
"net/http/httptest"
"os"
"reflect"
"sort"
"strconv"
2014-10-17 00:35:59 +04:00
"strings"
"sync/atomic"
2014-10-17 00:35:59 +04:00
"testing"
"time"
2014-11-07 22:47:55 +03:00
"github.com/coreos/etcd/client"
2014-10-17 00:35:59 +04:00
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/etcdhttp"
2014-12-05 01:20:58 +03:00
"github.com/coreos/etcd/pkg/testutil"
"github.com/coreos/etcd/pkg/transport"
2014-10-17 00:35:59 +04:00
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/rafthttp"
2014-11-07 22:47:55 +03:00
"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
2014-10-17 00:35:59 +04:00
)
2014-10-21 22:32:09 +04:00
const (
2014-11-07 22:47:55 +03:00
tickDuration = 10 * time.Millisecond
clusterName = "etcd"
requestTimeout = 20 * time.Second
2014-10-21 22:32:09 +04:00
)
2014-10-17 00:35:59 +04:00
var (
electionTicks = 10
// integration test uses well-known ports to listen for each running member,
// which ensures restarted member could listen on specific port again.
nextListenPort int64 = 20000
)
func init() {
// open microsecond-level time log for integration test debugging
log.SetFlags(log.Ltime | log.Lmicroseconds | log.Lshortfile)
if t := os.Getenv("ETCD_ELECTION_TIMEOUT_TICKS"); t != "" {
if i, err := strconv.ParseInt(t, 10, 64); err == nil {
electionTicks = int(i)
}
}
}
2014-10-17 00:35:59 +04:00
func TestClusterOf1(t *testing.T) { testCluster(t, 1) }
func TestClusterOf3(t *testing.T) { testCluster(t, 3) }
func testCluster(t *testing.T, size int) {
defer afterTest(t)
2014-11-07 21:01:52 +03:00
c := NewCluster(t, size)
2014-10-17 00:35:59 +04:00
c.Launch(t)
2014-11-07 22:47:55 +03:00
defer c.Terminate(t)
2014-12-05 01:01:03 +03:00
clusterMustProgress(t, c.Members)
2014-10-17 00:35:59 +04:00
}
2015-03-31 08:40:23 +03:00
func TestTLSClusterOf3(t *testing.T) {
defer afterTest(t)
c := NewTLSCluster(t, 3)
c.Launch(t)
defer c.Terminate(t)
clusterMustProgress(t, c.Members)
}
2014-11-07 21:01:52 +03:00
func TestClusterOf1UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 1) }
func TestClusterOf3UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 3) }
func testClusterUsingDiscovery(t *testing.T, size int) {
defer afterTest(t)
dc := NewCluster(t, 1)
dc.Launch(t)
defer dc.Terminate(t)
// init discovery token space
dcc := mustNewHTTPClient(t, dc.URLs())
dkapi := client.NewKeysAPI(dcc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
if _, err := dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", size)); err != nil {
2014-11-07 21:01:52 +03:00
t.Fatal(err)
}
cancel()
c := NewClusterByDiscovery(t, size, dc.URL(0)+"/v2/keys")
c.Launch(t)
defer c.Terminate(t)
2014-12-05 01:01:03 +03:00
clusterMustProgress(t, c.Members)
}
func TestTLSClusterOf3UsingDiscovery(t *testing.T) {
defer afterTest(t)
dc := NewCluster(t, 1)
dc.Launch(t)
defer dc.Terminate(t)
// init discovery token space
dcc := mustNewHTTPClient(t, dc.URLs())
dkapi := client.NewKeysAPI(dcc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
if _, err := dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", 3)); err != nil {
t.Fatal(err)
}
cancel()
c := NewTLSClusterByDiscovery(t, 3, dc.URL(0)+"/v2/keys")
c.Launch(t)
defer c.Terminate(t)
clusterMustProgress(t, c.Members)
}
func TestDoubleClusterSizeOf1(t *testing.T) { testDoubleClusterSize(t, 1) }
func TestDoubleClusterSizeOf3(t *testing.T) { testDoubleClusterSize(t, 3) }
func testDoubleClusterSize(t *testing.T, size int) {
defer afterTest(t)
c := NewCluster(t, size)
c.Launch(t)
defer c.Terminate(t)
for i := 0; i < size; i++ {
c.AddMember(t)
}
2014-12-05 01:01:03 +03:00
clusterMustProgress(t, c.Members)
}
2015-04-02 20:08:40 +03:00
func TestDoubleTLSClusterSizeOf3(t *testing.T) {
defer afterTest(t)
c := NewTLSCluster(t, 3)
c.Launch(t)
defer c.Terminate(t)
for i := 0; i < 3; i++ {
c.AddTLSMember(t)
}
clusterMustProgress(t, c.Members)
}
func TestDecreaseClusterSizeOf3(t *testing.T) { testDecreaseClusterSize(t, 3) }
func TestDecreaseClusterSizeOf5(t *testing.T) { testDecreaseClusterSize(t, 5) }
func testDecreaseClusterSize(t *testing.T, size int) {
defer afterTest(t)
c := NewCluster(t, size)
c.Launch(t)
defer c.Terminate(t)
// TODO: remove the last but one member
for i := 0; i < size-1; i++ {
id := c.Members[len(c.Members)-1].s.ID()
c.RemoveMember(t, uint64(id))
2014-12-05 01:01:03 +03:00
c.waitLeader(t, c.Members)
}
2014-12-05 01:01:03 +03:00
clusterMustProgress(t, c.Members)
}
2015-01-13 22:51:25 +03:00
func TestForceNewCluster(t *testing.T) {
c := NewCluster(t, 3)
c.Launch(t)
cc := mustNewHTTPClient(t, []string{c.Members[0].URL()})
kapi := client.NewKeysAPI(cc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
resp, err := kapi.Create(ctx, "/foo", "bar")
2015-01-13 22:51:25 +03:00
if err != nil {
t.Fatalf("unexpected create error: %v", err)
}
cancel()
// ensure create has been applied in this machine
ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
2015-08-27 23:24:47 +03:00
if _, err = kapi.Watcher("/foo", &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(ctx); err != nil {
2015-01-13 22:51:25 +03:00
t.Fatalf("unexpected watch error: %v", err)
}
cancel()
c.Members[0].Stop(t)
c.Members[1].Terminate(t)
c.Members[2].Terminate(t)
c.Members[0].ForceNewCluster = true
err = c.Members[0].Restart(t)
if err != nil {
t.Fatalf("unexpected ForceRestart error: %v", err)
}
defer c.Members[0].Terminate(t)
c.waitLeader(t, c.Members[:1])
2015-01-13 22:51:25 +03:00
2015-01-15 21:42:57 +03:00
// use new http client to init new connection
cc = mustNewHTTPClient(t, []string{c.Members[0].URL()})
kapi = client.NewKeysAPI(cc)
2015-01-13 22:51:25 +03:00
// ensure force restart keep the old data, and new cluster can make progress
2015-01-15 03:40:09 +03:00
ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
if _, err := kapi.Watcher("/foo", &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(ctx); err != nil {
2015-01-15 03:40:09 +03:00
t.Fatalf("unexpected watch error: %v", err)
2015-01-13 22:51:25 +03:00
}
2015-01-15 03:40:09 +03:00
cancel()
2015-01-13 22:51:25 +03:00
clusterMustProgress(t, c.Members[:1])
}
func TestAddMemberAfterClusterFullRotation(t *testing.T) {
defer afterTest(t)
c := NewCluster(t, 3)
c.Launch(t)
defer c.Terminate(t)
// remove all the previous three members and add in three new members.
for i := 0; i < 3; i++ {
c.RemoveMember(t, uint64(c.Members[0].s.ID()))
c.waitLeader(t, c.Members)
c.AddMember(t)
c.waitLeader(t, c.Members)
}
c.AddMember(t)
c.waitLeader(t, c.Members)
clusterMustProgress(t, c.Members)
}
// Ensure we can remove a member then add a new one back immediately.
func TestIssue2681(t *testing.T) {
defer afterTest(t)
c := NewCluster(t, 5)
c.Launch(t)
defer c.Terminate(t)
c.RemoveMember(t, uint64(c.Members[4].s.ID()))
c.waitLeader(t, c.Members)
c.AddMember(t)
c.waitLeader(t, c.Members)
clusterMustProgress(t, c.Members)
}
// Ensure we can remove a member after a snapshot then add a new one back.
func TestIssue2746(t *testing.T) {
defer afterTest(t)
c := NewCluster(t, 5)
for _, m := range c.Members {
m.SnapCount = 10
}
c.Launch(t)
defer c.Terminate(t)
// force a snapshot
for i := 0; i < 20; i++ {
clusterMustProgress(t, c.Members)
}
c.RemoveMember(t, uint64(c.Members[4].s.ID()))
c.waitLeader(t, c.Members)
c.AddMember(t)
c.waitLeader(t, c.Members)
clusterMustProgress(t, c.Members)
}
// Ensure etcd will not panic when removing a just started member.
func TestIssue2904(t *testing.T) {
defer afterTest(t)
// start 1-member cluster to ensure member 0 is the leader of the cluster.
c := NewCluster(t, 1)
c.Launch(t)
defer c.Terminate(t)
c.AddMember(t)
c.Members[1].Stop(t)
// send remove member-1 request to the cluster.
cc := mustNewHTTPClient(t, c.URLs())
ma := client.NewMembersAPI(cc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
// the proposal is not committed because member 1 is stopped, but the
// proposal is appended to leader's raft log.
ma.Remove(ctx, c.Members[1].s.ID().String())
cancel()
// restart member, and expect it to send updateAttr request.
// the log in the leader is like this:
// [..., remove 1, ..., update attr 1, ...]
c.Members[1].Restart(t)
// when the member comes back, it ack the proposal to remove itself,
// and apply it.
<-c.Members[1].s.StopNotify()
// terminate removed member
c.Members[1].Terminate(t)
c.Members = c.Members[:1]
// wait member to be removed.
c.waitMembersMatch(t, c.HTTPMembers())
}
// clusterMustProgress ensures that cluster can make progress. It creates
2014-12-05 01:01:03 +03:00
// a random key first, and check the new key could be got from all client urls
// of the cluster.
func clusterMustProgress(t *testing.T, membs []*member) {
cc := mustNewHTTPClient(t, []string{membs[0].URL()})
kapi := client.NewKeysAPI(cc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
2014-12-05 01:01:03 +03:00
key := fmt.Sprintf("foo%d", rand.Int())
resp, err := kapi.Create(ctx, "/"+key, "bar")
if err != nil {
2014-12-05 01:01:03 +03:00
t.Fatalf("create on %s error: %v", membs[0].URL(), err)
}
cancel()
2014-11-07 21:01:52 +03:00
2014-12-05 01:01:03 +03:00
for i, m := range membs {
u := m.URL()
mcc := mustNewHTTPClient(t, []string{u})
mkapi := client.NewKeysAPI(mcc)
mctx, mcancel := context.WithTimeout(context.Background(), requestTimeout)
if _, err := mkapi.Watcher(key, &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(mctx); err != nil {
t.Fatalf("#%d: watch on %s error: %v", i, u, err)
2014-11-07 21:01:52 +03:00
}
mcancel()
2014-11-07 21:01:52 +03:00
}
2014-10-17 00:35:59 +04:00
}
2014-10-17 10:57:54 +04:00
// TODO: support TLS
2014-11-07 21:01:52 +03:00
type cluster struct {
Members []*member
}
func fillClusterForMembers(ms []*member) error {
2014-11-07 21:01:52 +03:00
addrs := make([]string, 0)
for _, m := range ms {
2015-03-31 08:40:23 +03:00
scheme := "http"
if !m.PeerTLSInfo.Empty() {
scheme = "https"
}
2014-11-07 21:01:52 +03:00
for _, l := range m.PeerListeners {
2015-03-31 08:40:23 +03:00
addrs = append(addrs, fmt.Sprintf("%s=%s://%s", m.Name, scheme, l.Addr().String()))
2014-11-07 21:01:52 +03:00
}
2014-10-17 00:35:59 +04:00
}
clusterStr := strings.Join(addrs, ",")
var err error
2014-11-07 21:01:52 +03:00
for _, m := range ms {
m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr)
if err != nil {
2014-11-20 23:37:15 +03:00
return err
}
2014-11-07 21:01:52 +03:00
}
2014-11-20 23:37:15 +03:00
return nil
}
2015-03-31 08:40:23 +03:00
func newCluster(t *testing.T, size int, usePeerTLS bool) *cluster {
2014-11-20 23:37:15 +03:00
c := &cluster{}
ms := make([]*member, size)
for i := 0; i < size; i++ {
2015-03-31 08:40:23 +03:00
ms[i] = mustNewMember(t, c.name(i), usePeerTLS)
2014-11-20 23:37:15 +03:00
}
c.Members = ms
if err := fillClusterForMembers(c.Members); err != nil {
2014-11-20 23:37:15 +03:00
t.Fatal(err)
}
2014-10-17 00:35:59 +04:00
2014-11-07 21:01:52 +03:00
return c
}
func newClusterByDiscovery(t *testing.T, size int, usePeerTLS bool, url string) *cluster {
c := &cluster{}
ms := make([]*member, size)
for i := 0; i < size; i++ {
ms[i] = mustNewMember(t, c.name(i), usePeerTLS)
ms[i].DiscoveryURL = url
}
c.Members = ms
return c
}
2015-03-31 08:40:23 +03:00
// NewCluster returns an unlaunched cluster of the given size which has been
// set to use static bootstrap.
func NewCluster(t *testing.T, size int) *cluster {
return newCluster(t, size, false)
}
2014-11-07 21:01:52 +03:00
// NewClusterUsingDiscovery returns an unlaunched cluster of the given size
// which has been set to use the given url as discovery service to bootstrap.
func NewClusterByDiscovery(t *testing.T, size int, url string) *cluster {
return newClusterByDiscovery(t, size, false, url)
2014-11-07 21:01:52 +03:00
}
2015-03-31 08:40:23 +03:00
func NewTLSCluster(t *testing.T, size int) *cluster {
return newCluster(t, size, true)
}
func NewTLSClusterByDiscovery(t *testing.T, size int, url string) *cluster {
return newClusterByDiscovery(t, size, true, url)
}
2014-11-07 21:01:52 +03:00
func (c *cluster) Launch(t *testing.T) {
errc := make(chan error)
2014-11-07 21:01:52 +03:00
for _, m := range c.Members {
// Members are launched in separate goroutines because if they boot
// using discovery url, they have to wait for others to register to continue.
go func(m *member) {
errc <- m.Launch()
2014-11-07 21:01:52 +03:00
}(m)
}
for range c.Members {
if err := <-errc; err != nil {
t.Fatalf("error setting up member: %v", err)
}
}
// wait cluster to be stable to receive future client requests
c.waitMembersMatch(t, c.HTTPMembers())
c.waitVersion()
2014-10-17 00:35:59 +04:00
}
func (c *cluster) URL(i int) string {
return c.Members[i].ClientURLs[0].String()
}
2014-11-07 21:01:52 +03:00
func (c *cluster) URLs() []string {
urls := make([]string, 0)
for _, m := range c.Members {
for _, u := range m.ClientURLs {
urls = append(urls, u.String())
}
}
return urls
}
func (c *cluster) HTTPMembers() []client.Member {
ms := make([]client.Member, len(c.Members))
for i, m := range c.Members {
2015-03-31 08:40:23 +03:00
scheme := "http"
if !m.PeerTLSInfo.Empty() {
scheme = "https"
}
ms[i].Name = m.Name
for _, ln := range m.PeerListeners {
2015-03-31 08:40:23 +03:00
ms[i].PeerURLs = append(ms[i].PeerURLs, scheme+"://"+ln.Addr().String())
}
for _, ln := range m.ClientListeners {
ms[i].ClientURLs = append(ms[i].ClientURLs, "http://"+ln.Addr().String())
}
2014-10-17 00:35:59 +04:00
}
return ms
2014-10-17 00:35:59 +04:00
}
2015-04-02 20:08:40 +03:00
func (c *cluster) addMember(t *testing.T, usePeerTLS bool) {
m := mustNewMember(t, c.name(rand.Int()), usePeerTLS)
2015-04-02 20:08:40 +03:00
scheme := "http"
if usePeerTLS {
scheme = "https"
}
// send add request to the cluster
cc := mustNewHTTPClient(t, []string{c.URL(0)})
ma := client.NewMembersAPI(cc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
2015-04-02 20:08:40 +03:00
peerURL := scheme + "://" + m.PeerListeners[0].Addr().String()
if _, err := ma.Add(ctx, peerURL); err != nil {
t.Fatalf("add member on %s error: %v", c.URL(0), err)
}
cancel()
// wait for the add node entry applied in the cluster
members := append(c.HTTPMembers(), client.Member{PeerURLs: []string{peerURL}, ClientURLs: []string{}})
c.waitMembersMatch(t, members)
m.InitialPeerURLsMap = types.URLsMap{}
for _, mm := range c.Members {
m.InitialPeerURLsMap[mm.Name] = mm.PeerURLs
}
m.InitialPeerURLsMap[m.Name] = m.PeerURLs
m.NewCluster = false
if err := m.Launch(); err != nil {
t.Fatal(err)
}
c.Members = append(c.Members, m)
// wait cluster to be stable to receive future client requests
c.waitMembersMatch(t, c.HTTPMembers())
}
2015-04-02 20:08:40 +03:00
func (c *cluster) AddMember(t *testing.T) {
c.addMember(t, false)
}
func (c *cluster) AddTLSMember(t *testing.T) {
c.addMember(t, true)
}
func (c *cluster) RemoveMember(t *testing.T, id uint64) {
// send remove request to the cluster
cc := mustNewHTTPClient(t, c.URLs())
ma := client.NewMembersAPI(cc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
if err := ma.Remove(ctx, types.ID(id).String()); err != nil {
t.Fatalf("unexpected remove error %v", err)
}
cancel()
newMembers := make([]*member, 0)
for _, m := range c.Members {
if uint64(m.s.ID()) != id {
newMembers = append(newMembers, m)
} else {
select {
case <-m.s.StopNotify():
m.Terminate(t)
// 1s stop delay + election timeout + 1s disk and network delay + connection write timeout
// TODO: remove connection write timeout by selecting on http response closeNotifier
// blocking on https://github.com/golang/go/issues/9524
case <-time.After(time.Second + time.Duration(electionTicks)*tickDuration + time.Second + rafthttp.ConnWriteTimeout):
t.Fatalf("failed to remove member %s in time", m.s.ID())
}
}
}
c.Members = newMembers
c.waitMembersMatch(t, c.HTTPMembers())
}
func (c *cluster) Terminate(t *testing.T) {
for _, m := range c.Members {
m.Terminate(t)
}
}
func (c *cluster) waitMembersMatch(t *testing.T, membs []client.Member) {
for _, u := range c.URLs() {
cc := mustNewHTTPClient(t, []string{u})
ma := client.NewMembersAPI(cc)
for {
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
ms, err := ma.List(ctx)
cancel()
if err == nil && isMembersEqual(ms, membs) {
break
}
time.Sleep(tickDuration)
}
}
return
}
2014-12-05 01:01:03 +03:00
func (c *cluster) waitLeader(t *testing.T, membs []*member) {
possibleLead := make(map[uint64]bool)
var lead uint64
2014-12-05 01:01:03 +03:00
for _, m := range membs {
possibleLead[uint64(m.s.ID())] = true
}
for lead == 0 || !possibleLead[lead] {
lead = 0
2014-12-05 01:01:03 +03:00
for _, m := range membs {
if lead != 0 && lead != m.s.Lead() {
lead = 0
break
}
lead = m.s.Lead()
}
time.Sleep(10 * tickDuration)
}
}
func (c *cluster) waitVersion() {
for _, m := range c.Members {
for {
if m.s.ClusterVersion() != nil {
break
}
time.Sleep(tickDuration)
}
}
}
2014-10-17 00:35:59 +04:00
func (c *cluster) name(i int) string {
return fmt.Sprint("node", i)
}
// isMembersEqual checks whether two members equal except ID field.
// The given wmembs should always set ID field to empty string.
func isMembersEqual(membs []client.Member, wmembs []client.Member) bool {
sort.Sort(SortableMemberSliceByPeerURLs(membs))
sort.Sort(SortableMemberSliceByPeerURLs(wmembs))
for i := range membs {
membs[i].ID = ""
}
return reflect.DeepEqual(membs, wmembs)
}
2014-10-17 00:35:59 +04:00
func newLocalListener(t *testing.T) net.Listener {
port := atomic.AddInt64(&nextListenPort, 1)
l, err := net.Listen("tcp", "127.0.0.1:"+strconv.FormatInt(port, 10))
2014-10-17 00:35:59 +04:00
if err != nil {
t.Fatal(err)
}
return l
}
func newListenerWithAddr(t *testing.T, addr string) net.Listener {
var err error
var l net.Listener
// TODO: we want to reuse a previous closed port immediately.
// a better way is to set SO_REUSExx instead of doing retry.
for i := 0; i < 5; i++ {
l, err = net.Listen("tcp", addr)
if err == nil {
break
}
time.Sleep(500 * time.Millisecond)
}
if err != nil {
t.Fatal(err)
}
return l
}
2014-10-17 00:35:59 +04:00
type member struct {
etcdserver.ServerConfig
PeerListeners, ClientListeners []net.Listener
2015-03-31 08:40:23 +03:00
// inited PeerTLSInfo implies to enable peer TLS
PeerTLSInfo transport.TLSInfo
2014-10-17 00:35:59 +04:00
2014-12-05 01:20:58 +03:00
raftHandler *testutil.PauseableHandler
s *etcdserver.EtcdServer
hss []*httptest.Server
2014-10-17 00:35:59 +04:00
}
2015-03-31 08:40:23 +03:00
// mustNewMember return an inited member with the given name. If usePeerTLS is
// true, it will set PeerTLSInfo and use https scheme to communicate between
// peers.
func mustNewMember(t *testing.T, name string, usePeerTLS bool) *member {
var (
testTLSInfo = transport.TLSInfo{
KeyFile: "./fixtures/server.key.insecure",
CertFile: "./fixtures/server.crt",
TrustedCAFile: "./fixtures/ca.crt",
ClientCertAuth: true,
}
err error
)
2014-11-07 21:01:52 +03:00
m := &member{}
2015-03-31 08:40:23 +03:00
peerScheme := "http"
if usePeerTLS {
peerScheme = "https"
}
2014-11-07 21:01:52 +03:00
pln := newLocalListener(t)
m.PeerListeners = []net.Listener{pln}
2015-03-31 08:40:23 +03:00
m.PeerURLs, err = types.NewURLs([]string{peerScheme + "://" + pln.Addr().String()})
if err != nil {
t.Fatal(err)
}
2015-03-31 08:40:23 +03:00
if usePeerTLS {
m.PeerTLSInfo = testTLSInfo
}
2014-11-07 21:01:52 +03:00
cln := newLocalListener(t)
m.ClientListeners = []net.Listener{cln}
m.ClientURLs, err = types.NewURLs([]string{"http://" + cln.Addr().String()})
if err != nil {
t.Fatal(err)
}
m.Name = name
2014-11-07 21:01:52 +03:00
m.DataDir, err = ioutil.TempDir(os.TempDir(), "etcd")
if err != nil {
t.Fatal(err)
}
2015-03-31 08:40:23 +03:00
clusterStr := fmt.Sprintf("%s=%s://%s", name, peerScheme, pln.Addr().String())
m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr)
2014-11-07 21:01:52 +03:00
if err != nil {
t.Fatal(err)
}
m.InitialClusterToken = clusterName
2014-11-07 21:01:52 +03:00
m.NewCluster = true
m.ServerConfig.PeerTLSInfo = m.PeerTLSInfo
m.ElectionTicks = electionTicks
m.TickMs = uint(tickDuration / time.Millisecond)
2014-11-07 21:01:52 +03:00
return m
}
// Clone returns a member with the same server configuration. The returned
// member will not set PeerListeners and ClientListeners.
func (m *member) Clone(t *testing.T) *member {
mm := &member{}
mm.ServerConfig = m.ServerConfig
var err error
clientURLStrs := m.ClientURLs.StringSlice()
mm.ClientURLs, err = types.NewURLs(clientURLStrs)
if err != nil {
// this should never fail
panic(err)
}
peerURLStrs := m.PeerURLs.StringSlice()
mm.PeerURLs, err = types.NewURLs(peerURLStrs)
if err != nil {
// this should never fail
panic(err)
}
clusterStr := m.InitialPeerURLsMap.String()
mm.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr)
if err != nil {
// this should never fail
panic(err)
}
mm.InitialClusterToken = m.InitialClusterToken
mm.ElectionTicks = m.ElectionTicks
2015-03-31 08:40:23 +03:00
mm.PeerTLSInfo = m.PeerTLSInfo
return mm
}
2014-10-21 01:42:11 +04:00
// Launch starts a member based on ServerConfig, PeerListeners
// and ClientListeners.
func (m *member) Launch() error {
2014-11-05 00:09:24 +03:00
var err error
if m.s, err = etcdserver.NewServer(&m.ServerConfig); err != nil {
return fmt.Errorf("failed to initialize the etcd server: %v", err)
2014-11-05 00:09:24 +03:00
}
m.s.SyncTicker = time.Tick(500 * time.Millisecond)
2014-10-17 00:35:59 +04:00
m.s.Start()
m.raftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.s.Cluster(), m.s.RaftHandler())}
2014-12-05 01:20:58 +03:00
2014-10-17 00:35:59 +04:00
for _, ln := range m.PeerListeners {
hs := &httptest.Server{
Listener: ln,
2014-12-05 01:20:58 +03:00
Config: &http.Server{Handler: m.raftHandler},
2014-10-17 00:35:59 +04:00
}
2015-03-31 08:40:23 +03:00
if m.PeerTLSInfo.Empty() {
hs.Start()
} else {
hs.TLS, err = m.PeerTLSInfo.ServerConfig()
if err != nil {
return err
}
hs.StartTLS()
}
2014-10-17 00:35:59 +04:00
m.hss = append(m.hss, hs)
}
for _, ln := range m.ClientListeners {
hs := &httptest.Server{
Listener: ln,
Config: &http.Server{Handler: etcdhttp.NewClientHandler(m.s, m.ServerConfig.ReqTimeout())},
2014-10-17 00:35:59 +04:00
}
hs.Start()
m.hss = append(m.hss, hs)
}
return nil
2014-10-17 00:35:59 +04:00
}
2015-02-13 21:45:11 +03:00
func (m *member) WaitOK(t *testing.T) {
cc := mustNewHTTPClient(t, []string{m.URL()})
kapi := client.NewKeysAPI(cc)
for {
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
2015-01-29 01:29:03 +03:00
_, err := kapi.Get(ctx, "/", nil)
2015-02-13 21:45:11 +03:00
if err != nil {
time.Sleep(tickDuration)
continue
}
cancel()
break
}
for m.s.Leader() == 0 {
time.Sleep(tickDuration)
}
}
2014-12-05 01:01:03 +03:00
func (m *member) URL() string { return m.ClientURLs[0].String() }
2014-12-05 01:20:58 +03:00
func (m *member) Pause() {
m.raftHandler.Pause()
m.s.PauseSending()
}
func (m *member) Resume() {
m.raftHandler.Resume()
m.s.ResumeSending()
}
2014-10-21 01:42:11 +04:00
// Stop stops the member, but the data dir of the member is preserved.
2014-10-17 00:35:59 +04:00
func (m *member) Stop(t *testing.T) {
m.s.Stop()
for _, hs := range m.hss {
hs.CloseClientConnections()
hs.Close()
}
m.hss = nil
2014-10-17 00:35:59 +04:00
}
// Start starts the member using the preserved data dir.
func (m *member) Restart(t *testing.T) error {
newPeerListeners := make([]net.Listener, 0)
for _, ln := range m.PeerListeners {
newPeerListeners = append(newPeerListeners, newListenerWithAddr(t, ln.Addr().String()))
}
m.PeerListeners = newPeerListeners
newClientListeners := make([]net.Listener, 0)
for _, ln := range m.ClientListeners {
newClientListeners = append(newClientListeners, newListenerWithAddr(t, ln.Addr().String()))
}
m.ClientListeners = newClientListeners
return m.Launch()
2014-10-17 00:35:59 +04:00
}
// Terminate stops the member and removes the data dir.
2014-10-17 00:35:59 +04:00
func (m *member) Terminate(t *testing.T) {
m.s.Stop()
for _, hs := range m.hss {
hs.CloseClientConnections()
2014-10-17 00:35:59 +04:00
hs.Close()
}
if err := os.RemoveAll(m.ServerConfig.DataDir); err != nil {
t.Fatal(err)
}
}
2014-11-07 22:47:55 +03:00
2015-01-27 22:23:23 +03:00
func mustNewHTTPClient(t *testing.T, eps []string) client.Client {
2015-03-31 08:40:23 +03:00
cfg := client.Config{Transport: mustNewTransport(t, transport.TLSInfo{}), Endpoints: eps}
2015-01-27 04:26:15 +03:00
c, err := client.New(cfg)
2014-11-07 22:47:55 +03:00
if err != nil {
t.Fatal(err)
}
return c
2014-11-07 22:47:55 +03:00
}
2015-03-31 08:40:23 +03:00
func mustNewTransport(t *testing.T, tlsInfo transport.TLSInfo) *http.Transport {
// tick in integration test is short, so 1s dial timeout could play well.
tr, err := transport.NewTimeoutTransport(tlsInfo, time.Second, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout)
if err != nil {
t.Fatal(err)
}
2014-11-07 22:47:55 +03:00
return tr
}
type SortableMemberSliceByPeerURLs []client.Member
func (p SortableMemberSliceByPeerURLs) Len() int { return len(p) }
func (p SortableMemberSliceByPeerURLs) Less(i, j int) bool {
return p[i].PeerURLs[0] < p[j].PeerURLs[0]
}
func (p SortableMemberSliceByPeerURLs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }