Merge pull request #117196 from pohly/scheduler-perf-labels

scheduler_perf: support test case selection via labels
This commit is contained in:
Kubernetes Prow Robot 2023-04-26 14:26:14 -07:00 committed by GitHub
commit dd62a53e1a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 175 additions and 0 deletions

View File

@ -36,6 +36,17 @@ make test-integration WHAT=./test/integration/scheduler_perf ETCD_LOGLEVEL=warn
```
The benchmark suite runs all the tests specified under config/performance-config.yaml.
By default, it runs all workloads that have the "performance" label. In the configuration,
labels can be added to a test case and/or individual workloads. Each workload also has
all labels of its test case. The `perf-scheduling-label-filter` command line flag can
be used to select workloads. It works like GitHub label filtering: the flag accepts
a comma-separated list of label names. Each label may have a `+` or `-` as prefix. Labels with
`+` or no prefix must be set for a workload for it to be run. `-` means that the label must not
be set. For example, this runs all performance benchmarks except those that are labeled
as "fast":
```shell
make test-integration WHAT=./test/integration/scheduler_perf ETCD_LOGLEVEL=warn KUBE_TEST_VMODULE="''" KUBE_TEST_ARGS="-run=^$$ -benchtime=1ns -bench=BenchmarkPerfScheduling -perf-scheduling-label-filter=performance,-fast"
```
Once the benchmark is finished, JSON file with metrics is available in the current directory (test/integration/scheduler_perf). Look for `BenchmarkPerfScheduling_YYYY-MM-DDTHH:MM:SSZ.json`.
You can use `-data-items-dir` to generate the metrics file elsewhere.

View File

@ -1,4 +1,5 @@
- name: SchedulingBasic
labels: [performance]
defaultPodTemplatePath: config/pod-default.yaml
workloadTemplate:
- opcode: createNodes
@ -10,6 +11,7 @@
collectMetrics: true
workloads:
- name: 500Nodes
labels: [fast]
params:
initNodes: 500
initPods: 500
@ -21,6 +23,7 @@
measurePods: 1000
- name: SchedulingPodAntiAffinity
labels: [performance]
defaultPodTemplatePath: config/pod-with-pod-anti-affinity.yaml
workloadTemplate:
- opcode: createNodes
@ -37,6 +40,7 @@
namespace: sched-1
workloads:
- name: 500Nodes
labels: [fast]
params:
initNodes: 500
initPods: 100
@ -48,6 +52,7 @@
measurePods: 1000
- name: SchedulingSecrets
labels: [performance]
defaultPodTemplatePath: config/pod-with-secret-volume.yaml
workloadTemplate:
- opcode: createNodes
@ -59,6 +64,7 @@
collectMetrics: true
workloads:
- name: 500Nodes
labels: [fast]
params:
initNodes: 500
initPods: 500
@ -70,6 +76,7 @@
measurePods: 1000
- name: SchedulingInTreePVs
labels: [performance]
workloadTemplate:
- opcode: createNodes
countParam: $initNodes
@ -84,6 +91,7 @@
collectMetrics: true
workloads:
- name: 500Nodes
labels: [fast]
params:
initNodes: 500
initPods: 500
@ -95,6 +103,7 @@
measurePods: 1000
- name: SchedulingMigratedInTreePVs
labels: [performance]
workloadTemplate:
- opcode: createNodes
countParam: $initNodes
@ -118,6 +127,7 @@
collectMetrics: true
workloads:
- name: 500Nodes
labels: [fast]
params:
initNodes: 500
initPods: 500
@ -129,6 +139,7 @@
measurePods: 1000
- name: SchedulingCSIPVs
labels: [performance]
workloadTemplate:
- opcode: createNodes
countParam: $initNodes
@ -150,6 +161,7 @@
collectMetrics: true
workloads:
- name: 500Nodes
labels: [fast]
params:
initNodes: 500
initPods: 500
@ -161,6 +173,7 @@
measurePods: 1000
- name: SchedulingPodAffinity
labels: [performance]
defaultPodTemplatePath: config/pod-with-pod-affinity.yaml
workloadTemplate:
- opcode: createNodes
@ -181,6 +194,7 @@
collectMetrics: true
workloads:
- name: 500Nodes
labels: [fast]
params:
initNodes: 500
initPods: 500
@ -192,6 +206,7 @@
measurePods: 1000
- name: SchedulingPreferredPodAffinity
labels: [performance]
defaultPodTemplatePath: config/pod-with-preferred-pod-affinity.yaml
workloadTemplate:
- opcode: createNodes
@ -208,6 +223,7 @@
collectMetrics: true
workloads:
- name: 500Nodes
labels: [fast]
params:
initNodes: 500
initPods: 500
@ -219,6 +235,7 @@
measurePods: 1000
- name: SchedulingPreferredPodAntiAffinity
labels: [performance]
defaultPodTemplatePath: config/pod-with-preferred-pod-affinity.yaml
workloadTemplate:
- opcode: createNodes
@ -235,6 +252,7 @@
collectMetrics: true
workloads:
- name: 500Nodes
labels: [fast]
params:
initNodes: 500
initPods: 500
@ -246,6 +264,7 @@
measurePods: 1000
- name: SchedulingNodeAffinity
labels: [performance]
defaultPodTemplatePath: config/pod-with-node-affinity.yaml
workloadTemplate:
- opcode: createNodes
@ -261,6 +280,7 @@
collectMetrics: true
workloads:
- name: 500Nodes
labels: [fast]
params:
initNodes: 500
initPods: 500
@ -272,6 +292,7 @@
measurePods: 1000
- name: TopologySpreading
labels: [performance]
workloadTemplate:
- opcode: createNodes
countParam: $initNodes
@ -288,6 +309,7 @@
collectMetrics: true
workloads:
- name: 500Nodes
labels: [fast]
params:
initNodes: 500
initPods: 1000
@ -299,6 +321,7 @@
measurePods: 2000
- name: PreferredTopologySpreading
labels: [performance]
workloadTemplate:
- opcode: createNodes
countParam: $initNodes
@ -315,6 +338,7 @@
collectMetrics: true
workloads:
- name: 500Nodes
labels: [fast]
params:
initNodes: 500
initPods: 1000
@ -326,6 +350,7 @@
measurePods: 2000
- name: MixedSchedulingBasePod
labels: [performance]
defaultPodTemplatePath: config/pod-default.yaml
workloadTemplate:
- opcode: createNodes
@ -361,6 +386,7 @@
collectMetrics: true
workloads:
- name: 500Nodes
labels: [fast]
params:
initNodes: 500
initPods: 200
@ -372,6 +398,7 @@
measurePods: 1000
- name: PreemptionBasic
labels: [performance]
workloadTemplate:
- opcode: createNodes
countParam: $initNodes
@ -398,6 +425,7 @@
# measurePods: 5000
- name: PreemptionPVs
labels: [performance]
workloadTemplate:
- opcode: createNodes
countParam: $initNodes
@ -412,6 +440,7 @@
collectMetrics: true
workloads:
- name: 500Nodes
labels: [fast]
params:
initNodes: 500
initPods: 2000
@ -426,6 +455,7 @@
# measurePods: 5000
- name: Unschedulable
labels: [performance]
workloadTemplate:
- opcode: createNodes
countParam: $initNodes
@ -439,6 +469,7 @@
collectMetrics: true
workloads:
- name: 500Nodes/200InitPods
labels: [fast]
params:
initNodes: 500
initPods: 200
@ -472,6 +503,7 @@
collectMetrics: true
workloads:
- name: 1000Nodes
labels: [fast]
params:
initNodes: 1000
measurePods: 1000
@ -622,6 +654,7 @@
collectMetrics: true
workloads:
- name: 500Nodes
labels: [fast]
params:
taintNodes: 100
normalNodes: 400

View File

@ -0,0 +1,50 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package benchmark
import "strings"
// enabled checks a a label filter that works as in GitHub:
// - empty string means enabled
// - individual labels are comma-separated
// - [+]<label> means the workload must have that label
// - -<label> means the workload must not have that label
func enabled(labelFilter string, labels ...string) bool {
for _, label := range strings.Split(labelFilter, ",") {
if label == "" {
continue
}
mustHaveLabel := label[0] != '-'
if label[0] == '-' || label[0] == '+' {
label = label[1:]
}
haveLabel := containsStr(labels, label)
if haveLabel != mustHaveLabel {
return false
}
}
return true
}
func containsStr(hay []string, needle string) bool {
for _, item := range hay {
if item == needle {
return true
}
}
return false
}

View File

@ -0,0 +1,71 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package benchmark
import (
"strings"
"testing"
)
func TestLabelFilter(t *testing.T) {
empty := ""
performance := "performance"
fastPerformance := "performance,fast"
notFastPerformance := "+performance,-fast"
notFast := "-fast"
testcases := map[string]map[string]bool{
empty: {
empty: true,
performance: true,
fastPerformance: true,
},
performance: {
empty: false,
performance: true,
fastPerformance: true,
},
fastPerformance: {
empty: false,
performance: false,
fastPerformance: true,
},
notFast: {
empty: true,
performance: true,
fastPerformance: false,
},
notFastPerformance: {
empty: false,
performance: true,
fastPerformance: false,
},
}
for labelFilter, labelResults := range testcases {
t.Run(labelFilter, func(t *testing.T) {
for labels, expected := range labelResults {
t.Run(labels, func(t *testing.T) {
actual := enabled(labelFilter, strings.Split(labels, ",")...)
if actual != expected {
t.Errorf("expected enabled to be %v, got %v", expected, actual)
}
})
}
})
}
}

View File

@ -121,6 +121,8 @@ type testCase struct {
// This path can be overridden in createPodsOp by setting PodTemplatePath .
// Optional
DefaultPodTemplatePath *string
// Labels can be used to enable or disable workloads inside this test case.
Labels []string
}
func (tc *testCase) collectsMetrics() bool {
@ -151,6 +153,8 @@ type workload struct {
Name string
// Values of parameters used in the workloadTemplate.
Params params
// Labels can be used to enable or disable a workload.
Labels []string
}
type params struct {
@ -608,6 +612,8 @@ func initTestOutput(tb testing.TB) io.Writer {
return output
}
var perfSchedulingLabelFilter = flag.String("perf-scheduling-label-filter", "performance", "comma-separated list of labels which a testcase must have (no prefix or +) or must not have (-)")
func BenchmarkPerfScheduling(b *testing.B) {
testCases, err := getTestCases(configFile)
if err != nil {
@ -632,6 +638,10 @@ func BenchmarkPerfScheduling(b *testing.B) {
b.Run(tc.Name, func(b *testing.B) {
for _, w := range tc.Workloads {
b.Run(w.Name, func(b *testing.B) {
if !enabled(*perfSchedulingLabelFilter, append(tc.Labels, w.Labels...)...) {
b.Skipf("disabled by label filter %q", *perfSchedulingLabelFilter)
}
// Ensure that there are no leaked
// goroutines. They could influence
// performance of the next benchmark.