mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-02 00:07:50 +00:00
Merge pull request #126911 from macsko/scheduler_perf_throughput_fixes
Fix wrong throughput threshold for one scheduler_perf test case
This commit is contained in:
commit
8bbc0636b9
@ -158,7 +158,7 @@ the ci-benchmark-scheduler-perf periodic job will fail with an error log such as
|
||||
```
|
||||
--- FAIL: BenchmarkPerfScheduling/SchedulingBasic/5000Nodes_10000Pods
|
||||
...
|
||||
scheduler_perf.go:1098: ERROR: op 2: expected SchedulingThroughput Average to be higher: got 256.12, want 270
|
||||
scheduler_perf.go:1098: ERROR: op 2: SchedulingBasic/5000Nodes_10000Pods/namespace-2: expected SchedulingThroughput Average to be higher: got 256.12, want 270
|
||||
```
|
||||
|
||||
This allows to analyze which workload failed. Make sure that the failure is not an outlier
|
||||
|
@ -818,7 +818,7 @@
|
||||
measurePods: 1000
|
||||
- name: 5000Nodes_2000Pods
|
||||
labels: [performance]
|
||||
threshold: 35
|
||||
threshold: 24
|
||||
params:
|
||||
initNodes: 6000
|
||||
initPodsPerNamespace: 40
|
||||
@ -1070,7 +1070,6 @@
|
||||
measurePods: 10
|
||||
maxClaimsPerNode: 10
|
||||
- name: 2000pods_100nodes
|
||||
labels: [performance, fast]
|
||||
params:
|
||||
# In this testcase, the number of nodes is smaller
|
||||
# than the limit for the PodScheduling slices.
|
||||
@ -1214,7 +1213,6 @@
|
||||
measurePods: 10
|
||||
maxClaimsPerNode: 10
|
||||
- name: 2000pods_100nodes
|
||||
labels: [performance, fast]
|
||||
params:
|
||||
nodesWithDRA: 100
|
||||
nodesWithoutDRA: 0
|
||||
@ -1289,7 +1287,6 @@
|
||||
measureClaims: 2 # must be measurePods / 5
|
||||
maxClaimsPerNode: 2
|
||||
- name: 2000pods_100nodes
|
||||
labels: [performance, fast]
|
||||
params:
|
||||
nodesWithDRA: 100
|
||||
nodesWithoutDRA: 0
|
||||
|
@ -939,9 +939,9 @@ func compareMetricWithThreshold(items []DataItem, threshold float64, metricSelec
|
||||
for _, item := range items {
|
||||
if item.Labels["Metric"] == metricSelector.Name && labelsMatch(item.Labels, metricSelector.Labels) && !valueWithinThreshold(item.Data["Average"], threshold, metricSelector.ExpectLower) {
|
||||
if metricSelector.ExpectLower {
|
||||
return fmt.Errorf("expected %s Average to be lower: got %f, want %f", metricSelector.Name, item.Data["Average"], threshold)
|
||||
return fmt.Errorf("%s: expected %s Average to be lower: got %f, want %f", item.Labels["Name"], metricSelector.Name, item.Data["Average"], threshold)
|
||||
}
|
||||
return fmt.Errorf("expected %s Average to be higher: got %f, want %f", metricSelector.Name, item.Data["Average"], threshold)
|
||||
return fmt.Errorf("%s: expected %s Average to be higher: got %f, want %f", item.Labels["Name"], metricSelector.Name, item.Data["Average"], threshold)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
Loading…
Reference in New Issue
Block a user