Skip to content

Commit

Permalink
Merge branch 'kubernetes:master' into master
Browse files Browse the repository at this point in the history
  • Loading branch information
hakuna-matatah authored Jul 1, 2023
2 parents 900eaf6 + 4c08d58 commit 5eff2d7
Show file tree
Hide file tree
Showing 2,476 changed files with 358,083 additions and 60,850 deletions.
2 changes: 1 addition & 1 deletion .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,6 @@ linters:
# - misspell

service:
golangci-lint-version: 1.23.x # use the fixed version to not introduce new linters unexpectedly
golangci-lint-version: 1.51.x # use the fixed version to not introduce new linters unexpectedly
prepare:
- echo "here I can run custom commands, but no preparation needed for this repo"
1 change: 1 addition & 0 deletions clusterloader2/cmd/clusterloader.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ import (
_ "k8s.io/perf-tests/clusterloader2/pkg/measurement/common/bundle"
_ "k8s.io/perf-tests/clusterloader2/pkg/measurement/common/dns"
_ "k8s.io/perf-tests/clusterloader2/pkg/measurement/common/network"
_ "k8s.io/perf-tests/clusterloader2/pkg/measurement/common/network-policy"
_ "k8s.io/perf-tests/clusterloader2/pkg/measurement/common/probes"
_ "k8s.io/perf-tests/clusterloader2/pkg/measurement/common/slos"
)
Expand Down
2 changes: 1 addition & 1 deletion clusterloader2/docs/GETTING_STARTED.md
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ spec:
group: test-pod
spec:
containers:
- image: registry.k8s.io/pause:3.1
- image: registry.k8s.io/pause:3.9
name: {{.Name}}
```
## Execute test
Expand Down
8 changes: 4 additions & 4 deletions clusterloader2/examples/generic_query_example.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,16 +24,16 @@ steps:
unit: ms
queries:
- name: Perc99
query: histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket[%v])) by (le))
query: histogram_quantile(0.99, sum(rate(apiserver_request_sli_duration_seconds_bucket[%v])) by (le))
threshold: 60
- name: Perc90
query: histogram_quantile(0.9, sum(rate(apiserver_request_duration_seconds_bucket[%v])) by (le))
query: histogram_quantile(0.9, sum(rate(apiserver_request_sli_duration_seconds_bucket[%v])) by (le))
- name: Perc50
query: histogram_quantile(0.5, sum(rate(apiserver_request_duration_seconds_bucket[%v])) by (le))
query: histogram_quantile(0.5, sum(rate(apiserver_request_sli_duration_seconds_bucket[%v])) by (le))
threshold: 5
requireSamples: true
- name: non-existent
query: histogram_quantile(0.5, sum(rate(fake_apiserver_request_duration_seconds_bucket[%v])) by (le))
query: histogram_quantile(0.5, sum(rate(fake_apiserver_request_sli_duration_seconds_bucket[%v])) by (le))
threshold: 42
- name: Sleep
measurements:
Expand Down
11 changes: 11 additions & 0 deletions clusterloader2/pkg/framework/framework.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
"k8s.io/perf-tests/clusterloader2/pkg/framework/client"
frconfig "k8s.io/perf-tests/clusterloader2/pkg/framework/config"

"k8s.io/client-go/discovery"
restclient "k8s.io/client-go/rest"

// ensure auth plugins are loaded
Expand All @@ -51,6 +52,7 @@ type Framework struct {
dynamicClients *MultiDynamicClient
clusterConfig *config.ClusterConfig
restClientConfig *restclient.Config
discoveryClient *discovery.DiscoveryClient
}

// NewFramework creates new framework based on given clusterConfig.
Expand Down Expand Up @@ -85,6 +87,11 @@ func newFramework(clusterConfig *config.ClusterConfig, clientsNumber int, kubeCo
if f.restClientConfig, err = frconfig.GetConfig(kubeConfigPath); err != nil {
return nil, fmt.Errorf("rest client creation error: %v", err)
}

if f.discoveryClient, err = discovery.NewDiscoveryClientForConfig(f.restClientConfig); err != nil {
return nil, fmt.Errorf("discovery client creation error: %v", err)
}

return &f, nil
}

Expand Down Expand Up @@ -117,6 +124,10 @@ func (f *Framework) GetClusterConfig() *config.ClusterConfig {
return f.clusterConfig
}

func (f *Framework) GetDiscoveryClient() *discovery.DiscoveryClient {
return f.discoveryClient
}

// CreateAutomanagedNamespaces creates automanged namespaces.
func (f *Framework) CreateAutomanagedNamespaces(namespaceCount int, allowExistingNamespaces bool, deleteAutomanagedNamespaces bool) error {
f.mux.Lock()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,15 @@ import (
"k8s.io/klog/v2"
"k8s.io/perf-tests/clusterloader2/pkg/errors"
"k8s.io/perf-tests/clusterloader2/pkg/measurement"
measurementutil "k8s.io/perf-tests/clusterloader2/pkg/measurement/util"
"k8s.io/perf-tests/clusterloader2/pkg/util"
)

const (
cepPropagationDelayMeasurementName = "CiliumEndpointPropagationDelay"
// The metric definition and bucket sizes for cilium_endpoint_propagation_delay_seconds:
// https://github.com/cilium/cilium/blob/v1.11/pkg/metrics/metrics.go#L1263
cepPropagationDelayQuery = `sum(cilium_endpoint_propagation_delay_seconds_bucket) by (le)`
cepPropagationDelayQuery = `sum(sum_over_time(cilium_endpoint_propagation_delay_seconds_bucket[%v])) by (le)`
queryInterval = 10 * time.Minute

// bucketAllEntries is the default Prometheus bucket that
Expand Down Expand Up @@ -82,15 +83,26 @@ func (c *cepPropagationDelayGatherer) gatherCepPropagationDelay(executor QueryEx
// Query the data between start and end time on fixed intervals
// to get accurate data from multiple snapshots.
var samples []*model.Sample
queryTime := startTime.Add(queryInterval)
for queryTime.Before(endTime) {
newSamples, err := executor.Query(cepPropagationDelayQuery, queryTime)
prevQueryTime := startTime
currQueryTime := startTime.Add(queryInterval)
for {
if currQueryTime.After(endTime) {
currQueryTime = endTime
}
queryDuration := currQueryTime.Sub(prevQueryTime)
promDuration := measurementutil.ToPrometheusTime(queryDuration)
query := fmt.Sprintf(cepPropagationDelayQuery, promDuration)
newSamples, err := executor.Query(query, currQueryTime)
if err == nil {
samples = append(samples, newSamples...)
} else {
klog.V(2).Infof("Got error querying Prometheus: %v", err)
}
queryTime = queryTime.Add(queryInterval)
if currQueryTime == endTime {
break
}
prevQueryTime = currQueryTime
currQueryTime = currQueryTime.Add(queryInterval)
}

extractSampleData := func(sample *model.Sample) (string, string, int) {
Expand Down
4 changes: 2 additions & 2 deletions clusterloader2/pkg/measurement/common/executors/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@ type group struct {
Rules []rule `yaml:"rules"`
}

//prometheusRuleManifest mimics the structure of PrometheusRule object used by prometheus operator
//https://github.com/prometheus-operator/prometheus-operator/blob/main/pkg/apis/monitoring/v1/types.go#L1393
// prometheusRuleManifest mimics the structure of PrometheusRule object used by prometheus operator
// https://github.com/prometheus-operator/prometheus-operator/blob/main/pkg/apis/monitoring/v1/types.go#L1393
type prometheusRuleManifest struct {
Spec struct {
Groups []group `yaml:"groups"`
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ import (
const (
metricsServerPrometheusMeasurementName = "MetricsServerPrometheus"

metricsServerLatencyQuery = `histogram_quantile(%v, sum(rate(apiserver_request_duration_seconds_bucket{group="metrics.k8s.io",resource="pods",scope="cluster"}[%v])) by (le))`
metricsServerLatencyQuery = `histogram_quantile(%v, sum(rate(%v_bucket{group="metrics.k8s.io",resource="pods",scope="cluster"}[%v])) by (le))`
)

var (
Expand All @@ -47,7 +47,7 @@ func init() {
type metricsServerGatherer struct{}

func (g *metricsServerGatherer) Gather(executor QueryExecutor, startTime, endTime time.Time, config *measurement.Config) ([]measurement.Summary, error) {
latencyMetrics, err := g.gatherLatencyMetrics(executor, startTime, endTime)
latencyMetrics, err := g.gatherLatencyMetrics(executor, startTime, endTime, config)
if err != nil {
return nil, err
}
Expand All @@ -71,16 +71,17 @@ func (g *metricsServerGatherer) String() string {
return metricsServerPrometheusMeasurementName
}

func (g *metricsServerGatherer) gatherLatencyMetrics(executor QueryExecutor, startTime, endTime time.Time) (*measurementutil.LatencyMetric, error) {
func (g *metricsServerGatherer) gatherLatencyMetrics(executor QueryExecutor, startTime, endTime time.Time, config *measurement.Config) (*measurementutil.LatencyMetric, error) {
measurementDuration := endTime.Sub(startTime)
promDuration := measurementutil.ToPrometheusTime(measurementDuration)
apiserverSLI := measurementutil.GetApiserverSLI(config.ClusterVersion)

errList := errors.NewErrorList()
result := &measurementutil.LatencyMetric{}

for _, percentile := range desiredMsPercentiles {

query := fmt.Sprintf(metricsServerLatencyQuery, percentile, promDuration)
query := fmt.Sprintf(metricsServerLatencyQuery, percentile, apiserverSLI, promDuration)
samples, err := executor.Query(query, endTime)
if err != nil {
errList.Append(fmt.Errorf("failed to execute query %q, err - %v", query, err))
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{.Name}}
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: ["networking.k8s.io"]
resources: ["networkpolicies"]
verbs: ["get"]
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{.Name}}
subjects:
- kind: ServiceAccount
name: {{.Name}}
namespace: {{.Namespace}}
roleRef:
kind: ClusterRole
name: {{.Name}}
apiGroup: rbac.authorization.k8s.io
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{.Name}}
namespace: {{.Namespace}}
labels:
test: {{.TestClientLabel}}
type: {{.TypeLabelValue}}
spec:
replicas: 1
selector:
matchLabels:
name: {{.Name}}
template:
metadata:
labels:
name: {{.Name}}
test: {{.TestClientLabel}}
type: {{.TypeLabelValue}}
spec:
# Use separate nodes to avoid consuming CPU/Memory resources on default
# nodes where all deployments of the performance test run.
nodeSelector:
{{.TestClientNodeSelectorKey}}: {{.TestClientNodeSelectorValue}}
tolerations:
- key: {{.TestClientNodeSelectorKey}}
operator: Equal
value: {{.TestClientNodeSelectorValue}}
effect: NoSchedule
containers:
- name: net-policy-latency-client
ports:
- containerPort: {{.MetricsPort}}
name: npdelaymetrics
protocol: TCP
imagePullPolicy: Always
image: gcr.io/k8s-staging-perf-tests/network-policy-enforcement-latency/pod-creation-reachability-latency:v0.0.1
command:
- sh
- -c
- ./pod-creation-reachability-latency
-HostNamespace="{{.Namespace}}"
-TargetLabelSelector="{{.TargetLabelSelector}}"
-TargetNamespace="{{.TargetNamespace}}"
-TargetPort={{.TargetPort}}
-MaxTargets={{.MaxTargets}}
-MetricsPort={{.MetricsPort}}
resources:
requests:
cpu: 200m
memory: 100Mi
serviceAccountName: {{.ServiceAccountName}}
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{.Name}}
namespace: {{.Namespace}}
labels:
test: {{.TestClientLabel}}
type: {{.TypeLabelValue}}
spec:
replicas: 1
selector:
matchLabels:
name: {{.Name}}
template:
metadata:
labels:
name: {{.Name}}
test: {{.TestClientLabel}}
type: {{.TypeLabelValue}}
spec:
# Use separate nodes to avoid consuming CPU/Memory resources on default
# nodes where all deployments of the performance test run.
nodeSelector:
{{.TestClientNodeSelectorKey}}: {{.TestClientNodeSelectorValue}}
tolerations:
- key: {{.TestClientNodeSelectorKey}}
operator: Equal
value: {{.TestClientNodeSelectorValue}}
effect: NoSchedule
containers:
- name: net-policy-latency-client
ports:
- containerPort: {{.MetricsPort}}
name: npdelaymetrics
protocol: TCP
imagePullPolicy: Always
image: gcr.io/k8s-staging-perf-tests/network-policy-enforcement-latency/policy-creation-enforcement-latency:v0.0.1
command:
- sh
- -c
- ./policy-creation-enforcement-latency
-HostNamespace="{{.Namespace}}"
-TargetLabelSelector="{{.TargetLabelSelector}}"
-TargetNamespace="{{.TargetNamespace}}"
-TargetPort={{.TargetPort}}
-MaxTargets={{.MaxTargets}}
-MetricsPort={{.MetricsPort}}
-AllowPolicyName={{.AllowPolicyName}}
resources:
requests:
cpu: 200m
memory: 100Mi
serviceAccountName: {{.ServiceAccountName}}
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{.Name}}
namespace: {{.Namespace}}
labels:
test: {{.TestClientLabel}}
spec:
podSelector:
matchLabels:
test: {{.TestClientLabel}}
policyTypes:
- Egress
egress:
- ports:
- port: 443
protocol: TCP
- port: 80
protocol: TCP
to:
- ipBlock:
cidr: {{.kubeAPIServerIP}}/32
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{.Name}}
namespace: {{.Namespace}}
labels:
type: {{.TypeLabelValue}}
spec:
podSelector:
matchLabels:
type: {{.TypeLabelValue}}
policyTypes:
- Egress
egress:
- to:
- podSelector:
matchLabels:
{{.TargetLabelKey}}: {{.TargetLabelValue}}
{{if .OnlyTargetNamespace}}
namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: {{.TargetNamespace}}
{{else}}
namespaceSelector: {}
{{end}}
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{.Name}}
namespace: {{.Namespace}}
labels:
group: load
spec:
podSelector:
matchLabels:
{{.PodSelectorLabelKey}}: {{.PodSelectorLabelValue}}
policyTypes:
- Egress
egress:
- to:
- ipBlock:
cidr: {{.CIDR}}
ports:
# Use two ports to double the number of load network policy rules.
- protocol: TCP
port: 8080
- protocol: TCP
port: 6355
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
kind: ServiceAccount
apiVersion: v1
metadata:
name: {{.Name}}
namespace: {{.Namespace}}
Loading

0 comments on commit 5eff2d7

Please sign in to comment.