Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot] 0974b2e1cb
build(deps): bump github.com/prometheus/client_golang
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.15.1 to 1.16.0.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.15.1...v1.16.0)

---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
  dependency-type: indirect
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-06-19 09:52:19 +00:00
19 changed files with 98 additions and 286 deletions

View File

@ -40,7 +40,7 @@ jobs:
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@6c089f53dd51dc3fc7e599c3cb5356453a52ca9e # v2.20.0
uses: github/codeql-action/init@83f0fe6c4988d98a455712a27f0255212bba9bd4 # v2.3.6
with:
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
@ -50,6 +50,6 @@ jobs:
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@6c089f53dd51dc3fc7e599c3cb5356453a52ca9e # v2.20.0
uses: github/codeql-action/autobuild@83f0fe6c4988d98a455712a27f0255212bba9bd4 # v2.3.6
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@6c089f53dd51dc3fc7e599c3cb5356453a52ca9e # v2.20.0
uses: github/codeql-action/analyze@83f0fe6c4988d98a455712a27f0255212bba9bd4 # v2.3.6

View File

@ -50,6 +50,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@6c089f53dd51dc3fc7e599c3cb5356453a52ca9e # tag=v1.0.26
uses: github/codeql-action/upload-sarif@83f0fe6c4988d98a455712a27f0255212bba9bd4 # tag=v1.0.26
with:
sarif_file: results.sarif

View File

@ -8,7 +8,6 @@ Previous change logs can be found at [CHANGELOG-3.3](https://github.com/etcd-io/
### etcd server
- Fix [corruption check may get a `ErrCompacted` error when server has just been compacted](https://github.com/etcd-io/etcd/pull/16047)
- Improve [Lease put performance for the case that auth is disabled or the user is admin](https://github.com/etcd-io/etcd/pull/16020)
## v3.4.26 (2023-05-12)

View File

@ -8,7 +8,6 @@ Previous change logs can be found at [CHANGELOG-3.4](https://github.com/etcd-io/
### etcd server
- Fix [corruption check may get a `ErrCompacted` error when server has just been compacted](https://github.com/etcd-io/etcd/pull/16048)
- Improve [Lease put performance for the case that auth is disabled or the user is admin](https://github.com/etcd-io/etcd/pull/16019)
### etcd grpc-proxy
- Fix [Memberlist results not updated when proxy node down](https://github.com/etcd-io/etcd/pull/15907).

4
go.mod
View File

@ -69,10 +69,10 @@ require (
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.15.1 // indirect
github.com/prometheus/client_golang v1.16.0 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.43.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/prometheus/procfs v0.10.1 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
github.com/soheilhy/cmux v0.1.5 // indirect

8
go.sum
View File

@ -229,15 +229,15 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI=
github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/common v0.43.0 h1:iq+BVjvYLei5f27wiuNiB1DN6DYQkp1c8Bx0Vykh5us=
github.com/prometheus/common v0.43.0/go.mod h1:NCvr5cQIh3Y/gy73/RdVtC9r8xxrxwJnB+2lB3BxrFc=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=

View File

@ -39,7 +39,7 @@ func TestRobustness(t *testing.T) {
scenarios := []testScenario{}
for _, traffic := range []traffic.Config{traffic.LowTraffic, traffic.HighTraffic, traffic.KubernetesTraffic} {
scenarios = append(scenarios, testScenario{
name: traffic.Name + "/ClusterOfSize1",
name: traffic.Name + "ClusterOfSize1",
traffic: traffic,
cluster: *e2e.NewConfig(
e2e.WithClusterSize(1),
@ -61,7 +61,7 @@ func TestRobustness(t *testing.T) {
clusterOfSize3Options = append(clusterOfSize3Options, e2e.WithSnapshotCatchUpEntries(100))
}
scenarios = append(scenarios, testScenario{
name: traffic.Name + "/ClusterOfSize3",
name: traffic.Name + "ClusterOfSize3",
traffic: traffic,
cluster: *e2e.NewConfig(clusterOfSize3Options...),
})

View File

@ -47,12 +47,12 @@ var DeterministicModel = porcupine.Model{
return string(data)
},
Step: func(st interface{}, in interface{}, out interface{}) (bool, interface{}) {
var s EtcdState
var s etcdState
err := json.Unmarshal([]byte(st.(string)), &s)
if err != nil {
panic(err)
}
ok, s := s.apply(in.(EtcdRequest), out.(EtcdResponse))
ok, s := s.Step(in.(EtcdRequest), out.(EtcdResponse))
data, err := json.Marshal(s)
if err != nil {
panic(err)
@ -64,20 +64,20 @@ var DeterministicModel = porcupine.Model{
},
}
type EtcdState struct {
type etcdState struct {
Revision int64
KeyValues map[string]ValueRevision
KeyLeases map[string]int64
Leases map[int64]EtcdLease
}
func (s EtcdState) apply(request EtcdRequest, response EtcdResponse) (bool, EtcdState) {
newState, modelResponse := s.Step(request)
func (s etcdState) Step(request EtcdRequest, response EtcdResponse) (bool, etcdState) {
newState, modelResponse := s.step(request)
return Match(MaybeEtcdResponse{EtcdResponse: response}, modelResponse), newState
}
func freshEtcdState() EtcdState {
return EtcdState{
func freshEtcdState() etcdState {
return etcdState{
Revision: 1,
KeyValues: map[string]ValueRevision{},
KeyLeases: map[string]int64{},
@ -85,8 +85,8 @@ func freshEtcdState() EtcdState {
}
}
// Step handles a successful request, returning updated state and response it would generate.
func (s EtcdState) Step(request EtcdRequest) (EtcdState, MaybeEtcdResponse) {
// step handles a successful request, returning updated state and response it would generate.
func (s etcdState) step(request EtcdRequest) (etcdState, MaybeEtcdResponse) {
newKVs := map[string]ValueRevision{}
for k, v := range s.KeyValues {
newKVs[k] = v
@ -185,7 +185,7 @@ func (s EtcdState) Step(request EtcdRequest) (EtcdState, MaybeEtcdResponse) {
}
}
func (s EtcdState) getRange(key string, options RangeOptions) RangeResponse {
func (s etcdState) getRange(key string, options RangeOptions) RangeResponse {
response := RangeResponse{
KVs: []KeyValue{},
}
@ -217,7 +217,7 @@ func (s EtcdState) getRange(key string, options RangeOptions) RangeResponse {
return response
}
func detachFromOldLease(s EtcdState, key string) EtcdState {
func detachFromOldLease(s etcdState, key string) etcdState {
if oldLeaseId, ok := s.KeyLeases[key]; ok {
delete(s.Leases[oldLeaseId].Keys, key)
delete(s.KeyLeases, key)
@ -225,7 +225,7 @@ func detachFromOldLease(s EtcdState, key string) EtcdState {
return s
}
func attachToNewLease(s EtcdState, leaseID int64, key string) EtcdState {
func attachToNewLease(s etcdState, leaseID int64, key string) etcdState {
s.KeyLeases[key] = leaseID
s.Leases[leaseID].Keys[key] = leased
return s

View File

@ -33,12 +33,12 @@ func TestModelDeterministic(t *testing.T) {
if op.expectFailure == ok {
t.Logf("state: %v", state)
t.Errorf("Unexpected operation result, expect: %v, got: %v, operation: %s", !op.expectFailure, ok, DeterministicModel.DescribeOperation(op.req, op.resp.EtcdResponse))
var loadedState EtcdState
var loadedState etcdState
err := json.Unmarshal([]byte(state.(string)), &loadedState)
if err != nil {
t.Fatalf("Failed to load state: %v", err)
}
_, resp := loadedState.Step(op.req)
_, resp := loadedState.step(op.req)
t.Errorf("Response diff: %s", cmp.Diff(op.resp, resp))
break
}

View File

@ -40,7 +40,7 @@ var NonDeterministicModel = porcupine.Model{
if err != nil {
panic(err)
}
ok, states := states.apply(in.(EtcdRequest), out.(MaybeEtcdResponse))
ok, states := states.Step(in.(EtcdRequest), out.(MaybeEtcdResponse))
data, err := json.Marshal(states)
if err != nil {
panic(err)
@ -52,27 +52,27 @@ var NonDeterministicModel = porcupine.Model{
},
}
type nonDeterministicState []EtcdState
type nonDeterministicState []etcdState
func (states nonDeterministicState) apply(request EtcdRequest, response MaybeEtcdResponse) (bool, nonDeterministicState) {
func (states nonDeterministicState) Step(request EtcdRequest, response MaybeEtcdResponse) (bool, nonDeterministicState) {
var newStates nonDeterministicState
switch {
case response.Err != nil:
newStates = states.stepFailedResponse(request)
newStates = states.stepFailedRequest(request)
case response.PartialResponse:
newStates = states.applyResponseRevision(request, response.EtcdResponse.Revision)
newStates = states.stepPartialRequest(request, response.EtcdResponse.Revision)
default:
newStates = states.applySuccessfulResponse(request, response.EtcdResponse)
newStates = states.stepSuccessfulRequest(request, response.EtcdResponse)
}
return len(newStates) > 0, newStates
}
// stepFailedResponse duplicates number of states by considering both cases, request was persisted and request was lost.
func (states nonDeterministicState) stepFailedResponse(request EtcdRequest) nonDeterministicState {
// stepFailedRequest duplicates number of states by considering request persisted and lost.
func (states nonDeterministicState) stepFailedRequest(request EtcdRequest) nonDeterministicState {
newStates := make(nonDeterministicState, 0, len(states)*2)
for _, s := range states {
newStates = append(newStates, s)
newState, _ := s.Step(request)
newState, _ := s.step(request)
if !reflect.DeepEqual(newState, s) {
newStates = append(newStates, newState)
}
@ -80,11 +80,11 @@ func (states nonDeterministicState) stepFailedResponse(request EtcdRequest) nonD
return newStates
}
// applyResponseRevision filters possible states by leaving ony states that would return proper revision.
func (states nonDeterministicState) applyResponseRevision(request EtcdRequest, responseRevision int64) nonDeterministicState {
// stepPartialRequest filters possible states by leaving ony states that would return proper revision.
func (states nonDeterministicState) stepPartialRequest(request EtcdRequest, responseRevision int64) nonDeterministicState {
newStates := make(nonDeterministicState, 0, len(states))
for _, s := range states {
newState, modelResponse := s.Step(request)
newState, modelResponse := s.step(request)
if modelResponse.Revision == responseRevision {
newStates = append(newStates, newState)
}
@ -92,11 +92,11 @@ func (states nonDeterministicState) applyResponseRevision(request EtcdRequest, r
return newStates
}
// applySuccessfulResponse filters possible states by leaving ony states that would respond correctly.
func (states nonDeterministicState) applySuccessfulResponse(request EtcdRequest, response EtcdResponse) nonDeterministicState {
// stepSuccessfulRequest filters possible states by leaving ony states that would respond correctly.
func (states nonDeterministicState) stepSuccessfulRequest(request EtcdRequest, response EtcdResponse) nonDeterministicState {
newStates := make(nonDeterministicState, 0, len(states))
for _, s := range states {
newState, modelResponse := s.Step(request)
newState, modelResponse := s.step(request)
if Match(modelResponse, MaybeEtcdResponse{EtcdResponse: response}) {
newStates = append(newStates, newState)
}

View File

@ -339,7 +339,7 @@ func TestModelNonDeterministic(t *testing.T) {
t.Fatalf("Failed to load state: %v", err)
}
for i, s := range loadedState {
_, resp := s.Step(op.req)
_, resp := s.step(op.req)
t.Errorf("For state %d, response diff: %s", i, cmp.Diff(op.resp, resp))
}
break

View File

@ -1,99 +0,0 @@
// Copyright 2023 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
)
func NewReplay(eventHistory []WatchEvent) *EtcdReplay {
var lastEventRevision int64 = 1
for _, event := range eventHistory {
if event.Revision > lastEventRevision && event.Revision != lastEventRevision+1 {
panic("Replay requires a complete event history")
}
lastEventRevision = event.Revision
}
return &EtcdReplay{
eventHistory: eventHistory,
}
}
type EtcdReplay struct {
eventHistory []WatchEvent
// Cached state and event index used for it's calculation
cachedState *EtcdState
eventHistoryIndex int
}
func (r *EtcdReplay) StateForRevision(revision int64) (EtcdState, error) {
if revision < 1 {
return EtcdState{}, fmt.Errorf("invalid revision: %d", revision)
}
if r.cachedState == nil || r.cachedState.Revision > revision {
r.reset()
}
for r.eventHistoryIndex < len(r.eventHistory) && r.cachedState.Revision < revision {
nextRequest, nextRevision, nextIndex := r.next()
newState, _ := r.cachedState.Step(nextRequest)
if newState.Revision != nextRevision {
return EtcdState{}, fmt.Errorf("model returned different revision than one present in event history, model: %d, event: %d", newState.Revision, nextRevision)
}
r.cachedState = &newState
r.eventHistoryIndex = nextIndex
}
if r.eventHistoryIndex > len(r.eventHistory) && r.cachedState.Revision < revision {
return EtcdState{}, fmt.Errorf("requested revision higher then available in even history, requested: %d, model: %d", revision, r.cachedState.Revision)
}
return *r.cachedState, nil
}
func (r *EtcdReplay) reset() {
state := freshEtcdState()
r.cachedState = &state
r.eventHistoryIndex = 0
}
func (r *EtcdReplay) next() (request EtcdRequest, revision int64, index int) {
revision = r.eventHistory[r.eventHistoryIndex].Revision
index = r.eventHistoryIndex
operations := []EtcdOperation{}
for r.eventHistory[index].Revision == revision {
operations = append(operations, r.eventHistory[index].Op)
index++
}
return EtcdRequest{
Type: Txn,
Txn: &TxnRequest{
OperationsOnSuccess: operations,
},
}, revision, index
}
func operationToRequest(op EtcdOperation) EtcdRequest {
return EtcdRequest{
Type: Txn,
Txn: &TxnRequest{
OperationsOnSuccess: []EtcdOperation{op},
},
}
}
type WatchEvent struct {
Op EtcdOperation
Revision int64
}

View File

@ -34,7 +34,7 @@ type report struct {
lg *zap.Logger
clus *e2e.EtcdProcessCluster
clientReports []traffic.ClientReport
visualizeHistory func(path string) error
visualizeHistory func(path string)
}
func testResultsDirectory(t *testing.T) string {
@ -89,10 +89,7 @@ func (r *report) Report(t *testing.T, force bool) {
}
}
if r.visualizeHistory != nil {
err := r.visualizeHistory(filepath.Join(path, "history.html"))
if err != nil {
t.Error(err)
}
r.visualizeHistory(filepath.Join(path, "history.html"))
}
}

View File

@ -46,17 +46,22 @@ type RecordingClient struct {
}
type WatchResponse struct {
Events []model.WatchEvent
Events []WatchEvent
IsProgressNotify bool
Revision int64
Time time.Duration
}
type TimedWatchEvent struct {
model.WatchEvent
WatchEvent
Time time.Duration
}
type WatchEvent struct {
Op model.EtcdOperation
Revision int64
}
func NewClient(endpoints []string, ids identity.Provider, baseTime time.Time) (*RecordingClient, error) {
cc, err := clientv3.New(clientv3.Config{
Endpoints: endpoints,
@ -254,7 +259,7 @@ func ToWatchResponse(r clientv3.WatchResponse, baseTime time.Time) WatchResponse
return resp
}
func toWatchEvent(event clientv3.Event) model.WatchEvent {
func toWatchEvent(event clientv3.Event) WatchEvent {
var op model.OperationType
switch event.Type {
case mvccpb.PUT:
@ -264,7 +269,7 @@ func toWatchEvent(event clientv3.Event) model.WatchEvent {
default:
panic(fmt.Sprintf("Unexpected event type: %s", event.Type))
}
return model.WatchEvent{
return WatchEvent{
Revision: event.Kv.ModRevision,
Op: model.EtcdOperation{
Type: op,

View File

@ -100,13 +100,6 @@ func (t etcdTraffic) Run(ctx context.Context, c *RecordingClient, limiter *rate.
lastOperationSucceeded := true
var lastRev int64
var requestType etcdRequestType
client := etcdTrafficClient{
etcdTraffic: t,
client: c,
limiter: limiter,
idProvider: ids,
leaseStorage: lm,
}
for {
select {
case <-ctx.Done():
@ -122,7 +115,7 @@ func (t etcdTraffic) Run(ctx context.Context, c *RecordingClient, limiter *rate.
} else {
requestType = Get
}
rev, err := client.Request(ctx, requestType, key, lastRev)
rev, err := t.Request(ctx, c, requestType, limiter, key, ids, lm, lastRev)
lastOperationSucceeded = err == nil
if err != nil {
continue
@ -134,95 +127,87 @@ func (t etcdTraffic) Run(ctx context.Context, c *RecordingClient, limiter *rate.
}
}
type etcdTrafficClient struct {
etcdTraffic
client *RecordingClient
limiter *rate.Limiter
idProvider identity.Provider
leaseStorage identity.LeaseIdStorage
}
func (c etcdTrafficClient) Request(ctx context.Context, request etcdRequestType, key string, lastRev int64) (rev int64, err error) {
func (t etcdTraffic) Request(ctx context.Context, c *RecordingClient, request etcdRequestType, limiter *rate.Limiter, key string, id identity.Provider, lm identity.LeaseIdStorage, lastRev int64) (rev int64, err error) {
opCtx, cancel := context.WithTimeout(ctx, RequestTimeout)
switch request {
case StaleGet:
_, rev, err = c.client.Get(opCtx, key, lastRev)
_, rev, err = c.Get(opCtx, key, lastRev)
case Get:
_, rev, err = c.client.Get(opCtx, key, 0)
_, rev, err = c.Get(opCtx, key, 0)
case Put:
var resp *clientv3.PutResponse
resp, err = c.client.Put(opCtx, key, fmt.Sprintf("%d", c.idProvider.NewRequestId()))
resp, err = c.Put(opCtx, key, fmt.Sprintf("%d", id.NewRequestId()))
if resp != nil {
rev = resp.Header.Revision
}
case LargePut:
var resp *clientv3.PutResponse
resp, err = c.client.Put(opCtx, key, randString(c.largePutSize))
resp, err = c.Put(opCtx, key, randString(t.largePutSize))
if resp != nil {
rev = resp.Header.Revision
}
case Delete:
var resp *clientv3.DeleteResponse
resp, err = c.client.Delete(opCtx, key)
resp, err = c.Delete(opCtx, key)
if resp != nil {
rev = resp.Header.Revision
}
case MultiOpTxn:
var resp *clientv3.TxnResponse
resp, err = c.client.Txn(opCtx, nil, c.pickMultiTxnOps(), nil)
resp, err = c.Txn(opCtx, nil, t.pickMultiTxnOps(id), nil)
if resp != nil {
rev = resp.Header.Revision
}
case CompareAndSet:
var kv *mvccpb.KeyValue
kv, rev, err = c.client.Get(opCtx, key, 0)
kv, rev, err = c.Get(opCtx, key, 0)
if err == nil {
c.limiter.Wait(ctx)
limiter.Wait(ctx)
var expectedRevision int64
if kv != nil {
expectedRevision = kv.ModRevision
}
txnCtx, txnCancel := context.WithTimeout(ctx, RequestTimeout)
var resp *clientv3.TxnResponse
resp, err = c.client.Txn(txnCtx, []clientv3.Cmp{clientv3.Compare(clientv3.ModRevision(key), "=", expectedRevision)}, []clientv3.Op{clientv3.OpPut(key, fmt.Sprintf("%d", c.idProvider.NewRequestId()))}, nil)
resp, err = c.Txn(txnCtx, []clientv3.Cmp{clientv3.Compare(clientv3.ModRevision(key), "=", expectedRevision)}, []clientv3.Op{clientv3.OpPut(key, fmt.Sprintf("%d", id.NewRequestId()))}, nil)
txnCancel()
if resp != nil {
rev = resp.Header.Revision
}
}
case PutWithLease:
leaseId := c.leaseStorage.LeaseId(c.client.id)
leaseId := lm.LeaseId(c.id)
if leaseId == 0 {
var resp *clientv3.LeaseGrantResponse
resp, err = c.client.LeaseGrant(opCtx, c.leaseTTL)
resp, err = c.LeaseGrant(opCtx, t.leaseTTL)
if resp != nil {
leaseId = int64(resp.ID)
rev = resp.ResponseHeader.Revision
}
if err == nil {
c.leaseStorage.AddLeaseId(c.client.id, leaseId)
c.limiter.Wait(ctx)
lm.AddLeaseId(c.id, leaseId)
limiter.Wait(ctx)
}
}
if leaseId != 0 {
putCtx, putCancel := context.WithTimeout(ctx, RequestTimeout)
var resp *clientv3.PutResponse
resp, err = c.client.PutWithLease(putCtx, key, fmt.Sprintf("%d", c.idProvider.NewRequestId()), leaseId)
resp, err = c.PutWithLease(putCtx, key, fmt.Sprintf("%d", id.NewRequestId()), leaseId)
putCancel()
if resp != nil {
rev = resp.Header.Revision
}
}
case LeaseRevoke:
leaseId := c.leaseStorage.LeaseId(c.client.id)
leaseId := lm.LeaseId(c.id)
if leaseId != 0 {
var resp *clientv3.LeaseRevokeResponse
resp, err = c.client.LeaseRevoke(opCtx, leaseId)
resp, err = c.LeaseRevoke(opCtx, leaseId)
//if LeaseRevoke has failed, do not remove the mapping.
if err == nil {
c.leaseStorage.RemoveLeaseId(c.client.id)
lm.RemoveLeaseId(c.id)
}
if resp != nil {
rev = resp.Header.Revision
@ -230,7 +215,7 @@ func (c etcdTrafficClient) Request(ctx context.Context, request etcdRequestType,
}
case Defragment:
var resp *clientv3.DefragmentResponse
resp, err = c.client.Defragment(opCtx)
resp, err = c.Defragment(opCtx)
if resp != nil {
rev = resp.Header.Revision
}
@ -241,13 +226,13 @@ func (c etcdTrafficClient) Request(ctx context.Context, request etcdRequestType,
return rev, err
}
func (c etcdTrafficClient) pickMultiTxnOps() (ops []clientv3.Op) {
keys := rand.Perm(c.keyCount)
func (t etcdTraffic) pickMultiTxnOps(ids identity.Provider) (ops []clientv3.Op) {
keys := rand.Perm(t.keyCount)
opTypes := make([]model.OperationType, 4)
atLeastOnePut := false
for i := 0; i < MultiOpTxnOpCount; i++ {
opTypes[i] = c.pickOperationType()
opTypes[i] = t.pickOperationType()
if opTypes[i] == model.PutOperation {
atLeastOnePut = true
}
@ -263,7 +248,7 @@ func (c etcdTrafficClient) pickMultiTxnOps() (ops []clientv3.Op) {
case model.RangeOperation:
ops = append(ops, clientv3.OpGet(key))
case model.PutOperation:
value := fmt.Sprintf("%d", c.idProvider.NewRequestId())
value := fmt.Sprintf("%d", ids.NewRequestId())
ops = append(ops, clientv3.OpPut(key, value))
case model.DeleteOperation:
ops = append(ops, clientv3.OpDelete(key))

View File

@ -37,13 +37,13 @@ var (
maximalQPS: 1000,
clientCount: 12,
Traffic: kubernetesTraffic{
averageKeyCount: 10,
averageKeyCount: 5,
resource: "pods",
namespace: "default",
writeChoices: []choiceWeight[KubernetesRequestType]{
{choice: KubernetesUpdate, weight: 90},
{choice: KubernetesDelete, weight: 5},
{choice: KubernetesCreate, weight: 5},
{choice: KubernetesUpdate, weight: 75},
{choice: KubernetesDelete, weight: 15},
{choice: KubernetesCreate, weight: 10},
},
},
}

View File

@ -15,92 +15,28 @@
package validate
import (
"fmt"
"reflect"
"sort"
"testing"
"time"
"github.com/anishathalye/porcupine"
"github.com/google/go-cmp/cmp"
"go.uber.org/zap"
"go.etcd.io/etcd/tests/v3/robustness/model"
)
func validateOperationsAndVisualize(t *testing.T, lg *zap.Logger, operations []porcupine.Operation, eventHistory []model.WatchEvent) (visualize func(basepath string) error) {
const timeout = 5 * time.Minute
lg.Info("Validating linearizable operations", zap.Duration("timeout", timeout))
result, visualize := validateLinearizableOperationAndVisualize(lg, operations, timeout)
switch result {
case porcupine.Illegal:
t.Error("Linearization failed for provided operations")
return
case porcupine.Unknown:
func validateOperationHistoryAndReturnVisualize(t *testing.T, lg *zap.Logger, operations []porcupine.Operation) (visualize func(basepath string)) {
linearizable, info := porcupine.CheckOperationsVerbose(model.NonDeterministicModel, operations, 5*time.Minute)
if linearizable == porcupine.Illegal {
t.Error("Model is not linearizable")
return
case porcupine.Ok:
t.Log("Linearization passed")
default:
t.Fatalf("Unknown Linearization")
}
lg.Info("Validating serializable operations")
// TODO: Use linearization result instead of event history to get order of events
// This is currently impossible as porcupine doesn't expose operation order created during linearization.
validateSerializableOperations(t, operations, eventHistory)
return visualize
}
func validateLinearizableOperationAndVisualize(lg *zap.Logger, operations []porcupine.Operation, timeout time.Duration) (result porcupine.CheckResult, visualize func(basepath string) error) {
linearizable, info := porcupine.CheckOperationsVerbose(model.NonDeterministicModel, operations, timeout)
return linearizable, func(path string) error {
if linearizable == porcupine.Unknown {
t.Error("Linearization timed out")
}
return func(path string) {
lg.Info("Saving visualization", zap.String("path", path))
err := porcupine.VisualizePath(model.NonDeterministicModel, info, path)
if err != nil {
return fmt.Errorf("failed to visualize, err: %v", err)
}
return nil
}
}
func validateSerializableOperations(t *testing.T, operations []porcupine.Operation, totalEventHistory []model.WatchEvent) {
staleReads := filterSerializableReads(operations)
if len(staleReads) == 0 {
return
}
sort.Slice(staleReads, func(i, j int) bool {
return staleReads[i].Input.(model.EtcdRequest).Range.Revision < staleReads[j].Input.(model.EtcdRequest).Range.Revision
})
replay := model.NewReplay(totalEventHistory)
for _, read := range staleReads {
request := read.Input.(model.EtcdRequest)
response := read.Output.(model.MaybeEtcdResponse)
validateSerializableOperation(t, replay, request, response)
}
}
func filterSerializableReads(operations []porcupine.Operation) []porcupine.Operation {
resp := []porcupine.Operation{}
for _, op := range operations {
request := op.Input.(model.EtcdRequest)
if request.Type == model.Range && request.Range.Revision != 0 {
resp = append(resp, op)
t.Errorf("Failed to visualize, err: %v", err)
}
}
return resp
}
func validateSerializableOperation(t *testing.T, replay *model.EtcdReplay, request model.EtcdRequest, response model.MaybeEtcdResponse) {
if response.PartialResponse || response.Err != nil {
return
}
state, err := replay.StateForRevision(request.Range.Revision)
if err != nil {
t.Fatal(err)
}
_, expectResp := state.Step(request)
if !reflect.DeepEqual(response.EtcdResponse.Range, expectResp.Range) {
t.Errorf("Invalid serializable response, diff: %s", cmp.Diff(response.EtcdResponse.Range, expectResp.Range))
}
}

View File

@ -24,13 +24,14 @@ import (
"go.etcd.io/etcd/tests/v3/robustness/traffic"
)
// ValidateAndReturnVisualize returns visualize as porcupine.linearizationInfo used to generate visualization is private.
func ValidateAndReturnVisualize(t *testing.T, lg *zap.Logger, cfg Config, reports []traffic.ClientReport) (visualize func(basepath string) error) {
eventHistory := validateWatch(t, cfg, reports)
// ValidateAndReturnVisualize return visualize as porcupine.linearizationInfo used to generate visualization is private.
func ValidateAndReturnVisualize(t *testing.T, lg *zap.Logger, cfg Config, reports []traffic.ClientReport) (visualize func(basepath string)) {
validateWatch(t, cfg, reports)
// TODO: Validate stale reads responses.
allOperations := operations(reports)
watchEvents := uniqueWatchEvents(reports)
patchedOperations := patchOperationsWithWatchEvents(allOperations, watchEvents)
return validateOperationsAndVisualize(t, lg, patchedOperations, eventHistory)
newOperations := patchOperationsWithWatchEvents(allOperations, watchEvents)
return validateOperationHistoryAndReturnVisualize(t, lg, newOperations)
}
func operations(reports []traffic.ClientReport) []porcupine.Operation {

View File

@ -19,11 +19,10 @@ import (
"github.com/google/go-cmp/cmp"
"go.etcd.io/etcd/tests/v3/robustness/model"
"go.etcd.io/etcd/tests/v3/robustness/traffic"
)
func validateWatch(t *testing.T, cfg Config, reports []traffic.ClientReport) []model.WatchEvent {
func validateWatch(t *testing.T, cfg Config, reports []traffic.ClientReport) {
// Validate etcd watch properties defined in https://etcd.io/docs/v3.6/learning/api_guarantees/#watch-apis
for _, r := range reports {
validateOrdered(t, r)
@ -35,10 +34,8 @@ func validateWatch(t *testing.T, cfg Config, reports []traffic.ClientReport) []m
validateEventsMatch(t, reports)
// Expects that longest history encompasses all events.
// TODO: Use combined events from all histories instead of the longest history.
eventHistory := longestEventHistory(reports)
// TODO: Validate that each watch report is reliable, not only the longest one.
validateReliable(t, eventHistory)
return watchEvents(eventHistory)
validateReliable(t, longestEventHistory(reports))
}
func validateBookmarkable(t *testing.T, report traffic.ClientReport) {
@ -130,7 +127,7 @@ func validateEventsMatch(t *testing.T, reports []traffic.ClientReport) {
key string
}
type eventClientId struct {
model.WatchEvent
traffic.WatchEvent
ClientId int
}
revisionKeyToEvent := map[revisionKey]eventClientId{}
@ -161,11 +158,3 @@ func longestEventHistory(report []traffic.ClientReport) []traffic.TimedWatchEven
}
return toWatchEvents(report[longestIndex].Watch)
}
func watchEvents(timed []traffic.TimedWatchEvent) []model.WatchEvent {
result := make([]model.WatchEvent, 0, len(timed))
for _, event := range timed {
result = append(result, event.WatchEvent)
}
return result
}