Merge pull request #577 from unihorn/6

chore: rename 'heartbeat timeout' to 'heartbeat interval'
release-0.4
Yicheng Qin 2014-02-18 17:11:31 -08:00
commit 46d817f91b
6 changed files with 63 additions and 61 deletions

View File

@ -1,46 +1,46 @@
## Tuning
The default settings in etcd should work well for installations on a local network where the average network latency is low.
However, when using etcd across multiple data centers or over networks with high latency you may need to tweak the heartbeat and election timeout settings.
However, when using etcd across multiple data centers or over networks with high latency you may need to tweak the heartbeat interval and election timeout settings.
### Timeouts
### Time Parameters
The underlying distributed consensus protocol relies on two separate timeouts to ensure that nodes can handoff leadership if one stalls or goes offline.
The first timeout is called the *Heartbeat Timeout*.
The underlying distributed consensus protocol relies on two separate time parameters to ensure that nodes can handoff leadership if one stalls or goes offline.
The first parameter is called the *Heartbeat Interval*.
This is the frequency with which the leader will notify followers that it is still the leader.
etcd batches commands together for higher throughput so this heartbeat timeout is also a delay for how long it takes for commands to be committed.
By default, etcd uses a `50ms` heartbeat timeout.
etcd batches commands together for higher throughput so this heartbeat interval is also a delay for how long it takes for commands to be committed.
By default, etcd uses a `50ms` heartbeat interval.
The second timeout is the *Election Timeout*.
The second parameter is the *Election Timeout*.
This timeout is how long a follower node will go without hearing a heartbeat before attempting to become leader itself.
By default, etcd uses a `200ms` election timeout.
Adjusting these values is a trade off.
Lowering the heartbeat timeout will cause individual commands to be committed faster but it will lower the overall throughput of etcd.
If your etcd instances have low utilization then lowering the heartbeat timeout can improve your command response time.
Lowering the heartbeat interval will cause individual commands to be committed faster but it will lower the overall throughput of etcd.
If your etcd instances have low utilization then lowering the heartbeat interval can improve your command response time.
The election timeout should be set based on the heartbeat timeout and your network ping time between nodes.
The election timeout should be set based on the heartbeat interval and your network ping time between nodes.
Election timeouts should be at least 10 times your ping time so it can account for variance in your network.
For example, if the ping time between your nodes is 10ms then you should have at least a 100ms election timeout.
You should also set your election timeout to at least 4 to 5 times your heartbeat timeout to account for variance in leader replication.
For a heartbeat timeout of 50ms you should set your election timeout to at least 200ms - 250ms.
You should also set your election timeout to at least 4 to 5 times your heartbeat interval to account for variance in leader replication.
For a heartbeat interval of 50ms you should set your election timeout to at least 200ms - 250ms.
You can override the default values on the command line:
```sh
# Command line arguments:
$ etcd -peer-heartbeat-timeout=100 -peer-election-timeout=500
$ etcd -peer-heartbeat-interval=100 -peer-election-timeout=500
# Environment variables:
$ ETCD_PEER_HEARTBEAT_TIMEOUT=100 ETCD_PEER_ELECTION_TIMEOUT=500 etcd
$ ETCD_PEER_HEARTBEAT_INTERVAL=100 ETCD_PEER_ELECTION_TIMEOUT=500 etcd
```
Or you can set the values within the configuration file:
```toml
[peer]
heartbeat_timeout = 100
heartbeat_interval = 100
election_timeout = 100
```

View File

@ -24,24 +24,25 @@ const DefaultSystemConfigPath = "/etc/etcd/etcd.conf"
// A lookup of deprecated flags to their new flag name.
var newFlagNameLookup = map[string]string{
"C": "peers",
"CF": "peers-file",
"n": "name",
"c": "addr",
"cl": "bind-addr",
"s": "peer-addr",
"sl": "peer-bind-addr",
"d": "data-dir",
"m": "max-result-buffer",
"r": "max-retry-attempts",
"maxsize": "max-cluster-size",
"clientCAFile": "ca-file",
"clientCert": "cert-file",
"clientKey": "key-file",
"serverCAFile": "peer-ca-file",
"serverCert": "peer-cert-file",
"serverKey": "peer-key-file",
"snapshotCount": "snapshot-count",
"C": "peers",
"CF": "peers-file",
"n": "name",
"c": "addr",
"cl": "bind-addr",
"s": "peer-addr",
"sl": "peer-bind-addr",
"d": "data-dir",
"m": "max-result-buffer",
"r": "max-retry-attempts",
"maxsize": "max-cluster-size",
"clientCAFile": "ca-file",
"clientCert": "cert-file",
"clientKey": "key-file",
"serverCAFile": "peer-ca-file",
"serverCert": "peer-cert-file",
"serverKey": "peer-key-file",
"snapshotCount": "snapshot-count",
"peer-heartbeat-timeout": "peer-heartbeat-interval",
}
// Config represents the server configuration.
@ -73,13 +74,13 @@ type Config struct {
VeryVerbose bool `toml:"very_verbose" env:"ETCD_VERY_VERBOSE"`
VeryVeryVerbose bool `toml:"very_very_verbose" env:"ETCD_VERY_VERY_VERBOSE"`
Peer struct {
Addr string `toml:"addr" env:"ETCD_PEER_ADDR"`
BindAddr string `toml:"bind_addr" env:"ETCD_PEER_BIND_ADDR"`
CAFile string `toml:"ca_file" env:"ETCD_PEER_CA_FILE"`
CertFile string `toml:"cert_file" env:"ETCD_PEER_CERT_FILE"`
KeyFile string `toml:"key_file" env:"ETCD_PEER_KEY_FILE"`
HeartbeatTimeout int `toml:"heartbeat_timeout" env:"ETCD_PEER_HEARTBEAT_TIMEOUT"`
ElectionTimeout int `toml:"election_timeout" env:"ETCD_PEER_ELECTION_TIMEOUT"`
Addr string `toml:"addr" env:"ETCD_PEER_ADDR"`
BindAddr string `toml:"bind_addr" env:"ETCD_PEER_BIND_ADDR"`
CAFile string `toml:"ca_file" env:"ETCD_PEER_CA_FILE"`
CertFile string `toml:"cert_file" env:"ETCD_PEER_CERT_FILE"`
KeyFile string `toml:"key_file" env:"ETCD_PEER_KEY_FILE"`
HeartbeatInterval int `toml:"heartbeat_interval" env:"ETCD_PEER_HEARTBEAT_INTERVAL"`
ElectionTimeout int `toml:"election_timeout" env:"ETCD_PEER_ELECTION_TIMEOUT"`
}
strTrace string `toml:"trace" env:"ETCD_TRACE"`
GraphiteHost string `toml:"graphite_host" env:"ETCD_GRAPHITE_HOST"`
@ -97,7 +98,7 @@ func New() *Config {
c.Snapshot = true
c.SnapshotCount = 10000
c.Peer.Addr = "127.0.0.1:7001"
c.Peer.HeartbeatTimeout = defaultHeartbeatTimeout
c.Peer.HeartbeatInterval = defaultHeartbeatInterval
c.Peer.ElectionTimeout = defaultElectionTimeout
return c
}
@ -248,7 +249,7 @@ func (c *Config) LoadFlags(arguments []string) error {
f.IntVar(&c.MaxRetryAttempts, "max-retry-attempts", c.MaxRetryAttempts, "")
f.Float64Var(&c.RetryInterval, "retry-interval", c.RetryInterval, "")
f.IntVar(&c.MaxClusterSize, "max-cluster-size", c.MaxClusterSize, "")
f.IntVar(&c.Peer.HeartbeatTimeout, "peer-heartbeat-timeout", c.Peer.HeartbeatTimeout, "")
f.IntVar(&c.Peer.HeartbeatInterval, "peer-heartbeat-interval", c.Peer.HeartbeatInterval, "")
f.IntVar(&c.Peer.ElectionTimeout, "peer-election-timeout", c.Peer.ElectionTimeout, "")
f.StringVar(&cors, "cors", "", "")
@ -283,6 +284,7 @@ func (c *Config) LoadFlags(arguments []string) error {
f.IntVar(&c.MaxRetryAttempts, "r", c.MaxRetryAttempts, "(deprecated)")
f.IntVar(&c.MaxClusterSize, "maxsize", c.MaxClusterSize, "(deprecated)")
f.IntVar(&c.SnapshotCount, "snapshotCount", c.SnapshotCount, "(deprecated)")
f.IntVar(&c.Peer.HeartbeatInterval, "peer-heartbeat-timeout", c.Peer.HeartbeatInterval, "(deprecated)")
// END DEPRECATED FLAGS
if err := f.Parse(arguments); err != nil {
@ -390,18 +392,18 @@ func (c *Config) Sanitize() error {
// EtcdTLSInfo retrieves a TLSInfo object for the etcd server
func (c *Config) EtcdTLSInfo() server.TLSInfo {
return server.TLSInfo{
CAFile: c.CAFile,
CertFile: c.CertFile,
KeyFile: c.KeyFile,
CAFile: c.CAFile,
CertFile: c.CertFile,
KeyFile: c.KeyFile,
}
}
// PeerRaftInfo retrieves a TLSInfo object for the peer server.
func (c *Config) PeerTLSInfo() server.TLSInfo {
return server.TLSInfo{
CAFile: c.Peer.CAFile,
CertFile: c.Peer.CertFile,
KeyFile: c.Peer.KeyFile,
CAFile: c.Peer.CAFile,
CertFile: c.Peer.CertFile,
KeyFile: c.Peer.KeyFile,
}
}

View File

@ -5,5 +5,5 @@ const (
defaultElectionTimeout = 200
// The frequency (in ms) by which heartbeats are sent to followers.
defaultHeartbeatTimeout = 50
defaultHeartbeatInterval = 50
)

10
etcd.go
View File

@ -109,10 +109,10 @@ func main() {
serverStats := server.NewRaftServerStats(config.Name)
// Calculate all of our timeouts
heartbeatTimeout := time.Duration(config.Peer.HeartbeatTimeout) * time.Millisecond
heartbeatInterval := time.Duration(config.Peer.HeartbeatInterval) * time.Millisecond
electionTimeout := time.Duration(config.Peer.ElectionTimeout) * time.Millisecond
dialTimeout := (3 * heartbeatTimeout) + electionTimeout
responseHeaderTimeout := (3 * heartbeatTimeout) + electionTimeout
dialTimeout := (3 * heartbeatInterval) + electionTimeout
responseHeaderTimeout := (3 * heartbeatInterval) + electionTimeout
// Create peer server
psConfig := server.PeerServerConfig{
@ -145,7 +145,7 @@ func main() {
}
// Create raft transporter and server
raftTransporter := server.NewTransporter(followersStats, serverStats, registry, heartbeatTimeout, dialTimeout, responseHeaderTimeout)
raftTransporter := server.NewTransporter(followersStats, serverStats, registry, heartbeatInterval, dialTimeout, responseHeaderTimeout)
if psConfig.Scheme == "https" {
raftClientTLSConfig, err := config.PeerTLSInfo().ClientConfig()
if err != nil {
@ -158,7 +158,7 @@ func main() {
log.Fatal(err)
}
raftServer.SetElectionTimeout(electionTimeout)
raftServer.SetHeartbeatInterval(heartbeatTimeout)
raftServer.SetHeartbeatInterval(heartbeatInterval)
ps.SetRaftServer(raftServer)
// Create etcd server

View File

@ -44,8 +44,8 @@ Peer Communication Options:
-peer-ca-file=<path> Path to the peer CA file.
-peer-cert-file=<path> Path to the peer cert file.
-peer-key-file=<path> Path to the peer key file.
-peer-heartbeat-timeout=<time>
Time (in milliseconds) for a heartbeat to timeout.
-peer-heartbeat-interval=<time>
Time (in milliseconds) of a heartbeat interval.
-peer-election-timeout=<time>
Time (in milliseconds) for an election to timeout.

View File

@ -19,7 +19,7 @@ const (
testClientURL = "localhost:4401"
testRaftURL = "localhost:7701"
testSnapshotCount = 10000
testHeartbeatTimeout = time.Duration(50) * time.Millisecond
testHeartbeatInterval = time.Duration(50) * time.Millisecond
testElectionTimeout = time.Duration(200) * time.Millisecond
)
@ -51,15 +51,15 @@ func RunServer(f func(*server.Server)) {
}
// Create Raft transporter and server
dialTimeout := (3 * testHeartbeatTimeout) + testElectionTimeout
responseHeaderTimeout := (3 * testHeartbeatTimeout) + testElectionTimeout
raftTransporter := server.NewTransporter(followersStats, serverStats, registry, testHeartbeatTimeout, dialTimeout, responseHeaderTimeout)
dialTimeout := (3 * testHeartbeatInterval) + testElectionTimeout
responseHeaderTimeout := (3 * testHeartbeatInterval) + testElectionTimeout
raftTransporter := server.NewTransporter(followersStats, serverStats, registry, testHeartbeatInterval, dialTimeout, responseHeaderTimeout)
raftServer, err := raft.NewServer(testName, path, raftTransporter, store, ps, "")
if err != nil {
panic(err)
}
raftServer.SetElectionTimeout(testElectionTimeout)
raftServer.SetHeartbeatInterval(testHeartbeatTimeout)
raftServer.SetHeartbeatInterval(testHeartbeatInterval)
ps.SetRaftServer(raftServer)
s := server.New(testName, "http://"+testClientURL, ps, registry, store, nil)