Merge pull request #126911 from macsko/scheduler_perf_throughput_fixes

Fix wrong throughput threshold for one scheduler_perf test case
This commit is contained in:
Kubernetes Prow Robot 2024-08-26 18:42:17 +01:00 committed by GitHub
commit 8bbc0636b9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 4 additions and 7 deletions

View File

@ -158,7 +158,7 @@ the ci-benchmark-scheduler-perf periodic job will fail with an error log such as
```
--- FAIL: BenchmarkPerfScheduling/SchedulingBasic/5000Nodes_10000Pods
...
scheduler_perf.go:1098: ERROR: op 2: expected SchedulingThroughput Average to be higher: got 256.12, want 270
scheduler_perf.go:1098: ERROR: op 2: SchedulingBasic/5000Nodes_10000Pods/namespace-2: expected SchedulingThroughput Average to be higher: got 256.12, want 270
```
This allows to analyze which workload failed. Make sure that the failure is not an outlier

View File

@ -818,7 +818,7 @@
measurePods: 1000
- name: 5000Nodes_2000Pods
labels: [performance]
threshold: 35
threshold: 24
params:
initNodes: 6000
initPodsPerNamespace: 40
@ -1070,7 +1070,6 @@
measurePods: 10
maxClaimsPerNode: 10
- name: 2000pods_100nodes
labels: [performance, fast]
params:
# In this testcase, the number of nodes is smaller
# than the limit for the PodScheduling slices.
@ -1214,7 +1213,6 @@
measurePods: 10
maxClaimsPerNode: 10
- name: 2000pods_100nodes
labels: [performance, fast]
params:
nodesWithDRA: 100
nodesWithoutDRA: 0
@ -1289,7 +1287,6 @@
measureClaims: 2 # must be measurePods / 5
maxClaimsPerNode: 2
- name: 2000pods_100nodes
labels: [performance, fast]
params:
nodesWithDRA: 100
nodesWithoutDRA: 0

View File

@ -939,9 +939,9 @@ func compareMetricWithThreshold(items []DataItem, threshold float64, metricSelec
for _, item := range items {
if item.Labels["Metric"] == metricSelector.Name && labelsMatch(item.Labels, metricSelector.Labels) && !valueWithinThreshold(item.Data["Average"], threshold, metricSelector.ExpectLower) {
if metricSelector.ExpectLower {
return fmt.Errorf("expected %s Average to be lower: got %f, want %f", metricSelector.Name, item.Data["Average"], threshold)
return fmt.Errorf("%s: expected %s Average to be lower: got %f, want %f", item.Labels["Name"], metricSelector.Name, item.Data["Average"], threshold)
}
return fmt.Errorf("expected %s Average to be higher: got %f, want %f", metricSelector.Name, item.Data["Average"], threshold)
return fmt.Errorf("%s: expected %s Average to be higher: got %f, want %f", item.Labels["Name"], metricSelector.Name, item.Data["Average"], threshold)
}
}
return nil